minix/servers/inet/sr.c

1034 lines
21 KiB
C
Raw Normal View History

2005-04-21 16:53:53 +02:00
/* this file contains the interface of the network software with the file
* system.
*
* Copyright 1995 Philip Homburg
*
* The valid messages and their parameters are:
*
* Requests:
*
* m_type DEVICE USER_ENDPT COUNT
* --------------------------------------------------
* | DEV_OPEN | minor dev | proc nr | mode |
* |-------------+-----------+-----------+----------+
* | DEV_CLOSE | minor dev | proc nr | |
* |-------------+-----------+-----------+----------+
2005-04-21 16:53:53 +02:00
*
* m_type DEVICE USER_ENDPT COUNT IO_GRANT
* ---------------------------------------------------------------
* | DEV_READ_S | minor dev | proc nr | count | grant ID |
* |-------------+-----------+-----------+-----------+-----------|
* | DEV_WRITE_S | minor dev | proc nr | count | grant ID |
* |-------------+-----------+-----------+-----------+-----------|
* | DEV_IOCTL_S | minor dev | proc nr | command | grant ID |
* |-------------+-----------+-----------+-----------+-----------|
* | DEV_SELECT | minor dev | ops | | |
* |-------------+-----------+-----------+-----------+-----------|
2005-04-21 16:53:53 +02:00
*
* m_type
* --------------|
* | DEV_STATUS |
* |-------------|
*
* m_type DEVICE USER_ENDPT COUNT
* --------------------------------------------------|
* | CANCEL | minor dev | proc nr | mode |
* |-------------+-----------+-----------+-----------|
2005-04-21 16:53:53 +02:00
*
* Replies:
*
* m_type REP_ENDPT REP_STATUS REP_IO_GRANT
* -------------------------------------------------------|
* | TASK_REPLY | proc nr | status | grant ID |
* |---------------+-----------+-----------+--------------|
*
* m_type REP_ENDPT REP_STATUS REP_IO_GRANT
* ----------------+-----------+--------------------------|
* | DEV_REVIVE | proc nr | | grant ID |
* |---------------+-----------+-----------+--------------|
* | DEV_IO_READY | minor dev | sel ops | |
* |---------------+-----------+-----------+--------------|
* | DEV_NO_STATUS | | | |
* |---------------+-----------+-----------+--------------|
2005-04-21 16:53:53 +02:00
*/
#include "inet.h"
#include <sys/svrctl.h>
2005-04-21 16:53:53 +02:00
#include <minix/callnr.h>
#include "mq.h"
#include "qp.h"
2005-04-21 16:53:53 +02:00
#include "proto.h"
#include "generic/type.h"
#include "generic/assert.h"
#include "generic/buf.h"
#include "generic/event.h"
2005-04-21 16:53:53 +02:00
#include "generic/sr.h"
#include "sr_int.h"
2005-04-21 16:53:53 +02:00
THIS_FILE
2012-03-25 20:25:53 +02:00
sr_fd_t sr_fd_table[FD_NR];
static mq_t *repl_queue, *repl_queue_tail;
static struct vscp_vec s_cp_req[SCPVEC_NR];
static int sr_open(message *m);
static void sr_close(message *m);
static int sr_rwio(mq_t *m);
static int sr_restart_read(sr_fd_t *fdp);
static int sr_restart_write(sr_fd_t *fdp);
static int sr_restart_ioctl(sr_fd_t *fdp);
static int sr_cancel(message *m);
static int sr_select(message *m);
static void sr_status(message *m);
static void sr_reply_(mq_t *m, int reply, int is_revive);
static sr_fd_t *sr_getchannel(int minor);
static acc_t *sr_get_userdata(int fd, size_t offset, size_t count, int
for_ioctl);
2012-03-25 20:25:53 +02:00
static int sr_put_userdata(int fd, size_t offset, acc_t *data, int
for_ioctl);
2012-03-25 20:25:53 +02:00
static void sr_select_res(int fd, unsigned ops);
static int sr_repl_queue(int proc, int ref, int operation);
static int walk_queue(sr_fd_t *sr_fd, mq_t **q_head_ptr, mq_t
**q_tail_ptr, int type, int proc_nr, int ref, int first_flag);
2012-03-25 20:25:53 +02:00
static void sr_event(event_t *evp, ev_arg_t arg);
static int cp_u2b(endpoint_t proc, cp_grant_id_t gid, vir_bytes offset,
acc_t **var_acc_ptr, int size);
2012-03-25 20:25:53 +02:00
static int cp_b2u(acc_t *acc_ptr, endpoint_t proc, cp_grant_id_t gid,
vir_bytes offset);
2005-04-21 16:53:53 +02:00
2012-03-25 20:25:53 +02:00
void sr_init()
2005-04-21 16:53:53 +02:00
{
int i;
for (i=0; i<FD_NR; i++)
{
2005-04-21 16:53:53 +02:00
sr_fd_table[i].srf_flags= SFF_FREE;
ev_init(&sr_fd_table[i].srf_ioctl_ev);
ev_init(&sr_fd_table[i].srf_read_ev);
ev_init(&sr_fd_table[i].srf_write_ev);
}
2005-04-21 16:53:53 +02:00
repl_queue= NULL;
}
2012-03-25 20:25:53 +02:00
void sr_rec(m)
2005-04-21 16:53:53 +02:00
mq_t *m;
{
int result;
int send_reply = 0, free_mess = 0;
2005-04-21 16:53:53 +02:00
if (repl_queue)
{
if (m->mq_mess.m_type == CANCEL)
2005-04-21 16:53:53 +02:00
{
result= sr_repl_queue(m->mq_mess.USER_ENDPT,
(int)m->mq_mess.IO_GRANT, 0);
2005-04-21 16:53:53 +02:00
if (result)
{
mq_free(m);
return; /* canceled request in queue */
}
}
#if 0
2005-04-21 16:53:53 +02:00
else
sr_repl_queue(ANY, 0, 0);
#endif
2005-04-21 16:53:53 +02:00
}
switch (m->mq_mess.m_type)
{
case DEV_OPEN:
result= sr_open(&m->mq_mess);
send_reply= 1;
free_mess= 1;
break;
case DEV_CLOSE:
sr_close(&m->mq_mess);
result= OK;
send_reply= 1;
free_mess= 1;
break;
case DEV_READ_S:
case DEV_WRITE_S:
case DEV_IOCTL_S:
result= sr_rwio(m);
assert(result == OK || result == SUSPEND);
send_reply= (result == SUSPEND);
free_mess= 0;
break;
case CANCEL:
2005-04-21 16:53:53 +02:00
result= sr_cancel(&m->mq_mess);
assert(result == OK || result == EINTR);
send_reply= (result == EINTR);
free_mess= 1;
m->mq_mess.m_type= 0;
break;
case DEV_SELECT:
result= sr_select(&m->mq_mess);
send_reply= 1;
free_mess= 1;
2005-04-21 16:53:53 +02:00
break;
case DEV_STATUS:
sr_status(&m->mq_mess);
result= OK; /* Satisfy lint. */
send_reply= 0;
free_mess= 1;
break;
2005-04-21 16:53:53 +02:00
default:
ip_panic(("unknown message, from %d, type %d",
m->mq_mess.m_source, m->mq_mess.m_type));
}
if (send_reply)
{
sr_reply_(m, result, FALSE /* !is_revive */);
2005-04-21 16:53:53 +02:00
}
if (free_mess)
mq_free(m);
}
2012-03-25 20:25:53 +02:00
void sr_add_minor(minor, port, openf, closef, readf, writef,
ioctlf, cancelf, selectf)
2005-04-21 16:53:53 +02:00
int minor;
int port;
sr_open_t openf;
sr_close_t closef;
sr_read_t readf;
sr_write_t writef;
sr_ioctl_t ioctlf;
sr_cancel_t cancelf;
sr_select_t selectf;
2005-04-21 16:53:53 +02:00
{
sr_fd_t *sr_fd;
assert (minor>=0 && minor<FD_NR);
sr_fd= &sr_fd_table[minor];
assert(!(sr_fd->srf_flags & SFF_INUSE));
sr_fd->srf_flags= SFF_INUSE | SFF_MINOR;
sr_fd->srf_port= port;
sr_fd->srf_open= openf;
sr_fd->srf_close= closef;
sr_fd->srf_write= writef;
sr_fd->srf_read= readf;
sr_fd->srf_ioctl= ioctlf;
sr_fd->srf_cancel= cancelf;
sr_fd->srf_select= selectf;
2005-04-21 16:53:53 +02:00
}
2012-03-25 20:25:53 +02:00
static int sr_open(m)
2005-04-21 16:53:53 +02:00
message *m;
{
sr_fd_t *sr_fd;
int minor= m->DEVICE;
2005-04-21 16:53:53 +02:00
int i, fd;
if (minor<0 || minor>FD_NR)
{
DBLOCK(1, printf("replying EINVAL\n"));
return EINVAL;
}
if (!(sr_fd_table[minor].srf_flags & SFF_MINOR))
{
DBLOCK(1, printf("replying ENXIO\n"));
return ENXIO;
}
for (i=0; i<FD_NR && (sr_fd_table[i].srf_flags & SFF_INUSE); i++);
if (i>=FD_NR)
{
DBLOCK(1, printf("replying ENFILE\n"));
return ENFILE;
}
sr_fd= &sr_fd_table[i];
*sr_fd= sr_fd_table[minor];
sr_fd->srf_flags= SFF_INUSE;
fd= (*sr_fd->srf_open)(sr_fd->srf_port, i, sr_get_userdata,
sr_put_userdata, 0 /* no put_pkt */, sr_select_res);
2005-04-21 16:53:53 +02:00
if (fd<0)
{
sr_fd->srf_flags= SFF_FREE;
DBLOCK(1, printf("replying %d\n", fd));
return fd;
}
sr_fd->srf_fd= fd;
return i;
}
2012-03-25 20:25:53 +02:00
static void sr_close(m)
2005-04-21 16:53:53 +02:00
message *m;
{
sr_fd_t *sr_fd;
sr_fd= sr_getchannel(m->DEVICE);
2005-04-21 16:53:53 +02:00
assert (sr_fd);
if (sr_fd->srf_flags & SFF_BUSY)
ip_panic(("close on busy channel"));
2005-04-21 16:53:53 +02:00
assert (!(sr_fd->srf_flags & SFF_MINOR));
(*sr_fd->srf_close)(sr_fd->srf_fd);
sr_fd->srf_flags= SFF_FREE;
}
2012-03-25 20:25:53 +02:00
static int sr_rwio(m)
2005-04-21 16:53:53 +02:00
mq_t *m;
{
sr_fd_t *sr_fd;
mq_t **q_head_ptr, **q_tail_ptr;
int ip_flag, susp_flag, first_flag;
2005-04-21 16:53:53 +02:00
int r;
ioreq_t request;
size_t size;
sr_fd= sr_getchannel(m->mq_mess.DEVICE);
assert (sr_fd);
switch(m->mq_mess.m_type)
{
case DEV_READ_S:
q_head_ptr= &sr_fd->srf_read_q;
q_tail_ptr= &sr_fd->srf_read_q_tail;
ip_flag= SFF_READ_IP;
susp_flag= SFF_READ_SUSP;
first_flag= SFF_READ_FIRST;
break;
case DEV_WRITE_S:
q_head_ptr= &sr_fd->srf_write_q;
q_tail_ptr= &sr_fd->srf_write_q_tail;
ip_flag= SFF_WRITE_IP;
susp_flag= SFF_WRITE_SUSP;
first_flag= SFF_WRITE_FIRST;
break;
case DEV_IOCTL_S:
q_head_ptr= &sr_fd->srf_ioctl_q;
q_tail_ptr= &sr_fd->srf_ioctl_q_tail;
ip_flag= SFF_IOCTL_IP;
susp_flag= SFF_IOCTL_SUSP;
first_flag= SFF_IOCTL_FIRST;
break;
default:
ip_panic(("illegal case entry"));
}
if (sr_fd->srf_flags & ip_flag)
{
assert(sr_fd->srf_flags & susp_flag);
assert(*q_head_ptr);
(*q_tail_ptr)->mq_next= m;
*q_tail_ptr= m;
return SUSPEND;
}
assert(!*q_head_ptr);
*q_tail_ptr= *q_head_ptr= m;
sr_fd->srf_flags |= ip_flag;
assert(!(sr_fd->srf_flags & first_flag));
sr_fd->srf_flags |= first_flag;
switch(m->mq_mess.m_type)
{
case DEV_READ_S:
r= (*sr_fd->srf_read)(sr_fd->srf_fd,
m->mq_mess.COUNT);
break;
case DEV_WRITE_S:
r= (*sr_fd->srf_write)(sr_fd->srf_fd,
m->mq_mess.COUNT);
break;
case DEV_IOCTL_S:
request= m->mq_mess.REQUEST;
size= (request >> 16) & _IOCPARM_MASK;
if (size>MAX_IOCTL_S)
{
DBLOCK(1, printf("replying EINVAL\n"));
r= sr_put_userdata(sr_fd-sr_fd_table, EINVAL,
NULL, 1);
assert(r == OK);
assert(sr_fd->srf_flags & first_flag);
sr_fd->srf_flags &= ~first_flag;
return OK;
}
r= (*sr_fd->srf_ioctl)(sr_fd->srf_fd, request);
break;
default:
ip_panic(("illegal case entry"));
}
assert(sr_fd->srf_flags & first_flag);
sr_fd->srf_flags &= ~first_flag;
assert(r == OK || r == SUSPEND ||
(printf("r= %d\n", r), 0));
if (r == SUSPEND)
sr_fd->srf_flags |= susp_flag;
else
mq_free(m);
return r;
}
2012-03-25 20:25:53 +02:00
static int sr_restart_read(sr_fd)
sr_fd_t *sr_fd;
{
mq_t *mp;
int r;
mp= sr_fd->srf_read_q;
assert(mp);
if (sr_fd->srf_flags & SFF_READ_IP)
{
assert(sr_fd->srf_flags & SFF_READ_SUSP);
return SUSPEND;
}
sr_fd->srf_flags |= SFF_READ_IP;
r= (*sr_fd->srf_read)(sr_fd->srf_fd,
mp->mq_mess.COUNT);
assert(r == OK || r == SUSPEND ||
(printf("r= %d\n", r), 0));
if (r == SUSPEND)
sr_fd->srf_flags |= SFF_READ_SUSP;
return r;
}
2012-03-25 20:25:53 +02:00
static int sr_restart_write(sr_fd)
sr_fd_t *sr_fd;
{
mq_t *mp;
int r;
mp= sr_fd->srf_write_q;
assert(mp);
if (sr_fd->srf_flags & SFF_WRITE_IP)
{
assert(sr_fd->srf_flags & SFF_WRITE_SUSP);
return SUSPEND;
}
sr_fd->srf_flags |= SFF_WRITE_IP;
r= (*sr_fd->srf_write)(sr_fd->srf_fd,
mp->mq_mess.COUNT);
assert(r == OK || r == SUSPEND ||
(printf("r= %d\n", r), 0));
if (r == SUSPEND)
sr_fd->srf_flags |= SFF_WRITE_SUSP;
return r;
}
2012-03-25 20:25:53 +02:00
static int sr_restart_ioctl(sr_fd)
sr_fd_t *sr_fd;
{
mq_t *mp;
int r;
mp= sr_fd->srf_ioctl_q;
assert(mp);
if (sr_fd->srf_flags & SFF_IOCTL_IP)
{
assert(sr_fd->srf_flags & SFF_IOCTL_SUSP);
return SUSPEND;
}
sr_fd->srf_flags |= SFF_IOCTL_IP;
r= (*sr_fd->srf_ioctl)(sr_fd->srf_fd,
mp->mq_mess.COUNT);
assert(r == OK || r == SUSPEND ||
(printf("r= %d\n", r), 0));
if (r == SUSPEND)
sr_fd->srf_flags |= SFF_IOCTL_SUSP;
return r;
}
2012-03-25 20:25:53 +02:00
static int sr_cancel(m)
2005-04-21 16:53:53 +02:00
message *m;
{
sr_fd_t *sr_fd;
int result;
int proc_nr, ref;
2005-04-21 16:53:53 +02:00
result=EINTR;
proc_nr= m->USER_ENDPT;
ref= (int)m->IO_GRANT;
sr_fd= sr_getchannel(m->DEVICE);
2005-04-21 16:53:53 +02:00
assert (sr_fd);
result= walk_queue(sr_fd, &sr_fd->srf_ioctl_q,
&sr_fd->srf_ioctl_q_tail, SR_CANCEL_IOCTL,
proc_nr, ref, SFF_IOCTL_FIRST);
if (result != EAGAIN)
return result;
result= walk_queue(sr_fd, &sr_fd->srf_read_q,
&sr_fd->srf_read_q_tail, SR_CANCEL_READ,
proc_nr, ref, SFF_READ_FIRST);
if (result != EAGAIN)
return result;
result= walk_queue(sr_fd, &sr_fd->srf_write_q,
&sr_fd->srf_write_q_tail, SR_CANCEL_WRITE,
proc_nr, ref, SFF_WRITE_FIRST);
if (result != EAGAIN)
return result;
ip_panic((
"request not found: from %d, type %d, MINOR= %d, PROC= %d, REF= %d",
m->m_source, m->m_type, m->DEVICE,
m->USER_ENDPT, (int) m->IO_GRANT));
2010-07-02 14:41:19 +02:00
return result;
2005-04-21 16:53:53 +02:00
}
2012-03-25 20:25:53 +02:00
static int sr_select(m)
message *m;
{
sr_fd_t *sr_fd;
int r;
unsigned m_ops, i_ops;
sr_fd= sr_getchannel(m->DEVICE);
assert (sr_fd);
sr_fd->srf_select_proc= m->m_source;
m_ops= m->USER_ENDPT;
i_ops= 0;
if (m_ops & SEL_RD) i_ops |= SR_SELECT_READ;
if (m_ops & SEL_WR) i_ops |= SR_SELECT_WRITE;
if (m_ops & SEL_ERR) i_ops |= SR_SELECT_EXCEPTION;
if (!(m_ops & SEL_NOTIFY)) i_ops |= SR_SELECT_POLL;
r= (*sr_fd->srf_select)(sr_fd->srf_fd, i_ops);
if (r < 0)
return r;
m_ops= 0;
if (r & SR_SELECT_READ) m_ops |= SEL_RD;
if (r & SR_SELECT_WRITE) m_ops |= SEL_WR;
if (r & SR_SELECT_EXCEPTION) m_ops |= SEL_ERR;
return m_ops;
}
2012-03-25 20:25:53 +02:00
static void sr_status(m)
message *m;
{
int fd, result;
unsigned m_ops;
sr_fd_t *sr_fd;
mq_t *mq;
mq= repl_queue;
if (mq != NULL)
{
repl_queue= mq->mq_next;
mq->mq_mess.m_type= DEV_REVIVE;
result= send(mq->mq_mess.m_source, &mq->mq_mess);
if (result != OK)
ip_panic(("unable to send"));
mq_free(mq);
return;
}
for (fd=0, sr_fd= sr_fd_table; fd<FD_NR; fd++, sr_fd++)
{
if ((sr_fd->srf_flags &
(SFF_SELECT_R|SFF_SELECT_W|SFF_SELECT_X)) == 0)
{
/* Nothing to report */
continue;
}
if (sr_fd->srf_select_proc != m->m_source)
{
/* Wrong process */
continue;
}
m_ops= 0;
if (sr_fd->srf_flags & SFF_SELECT_R) m_ops |= SEL_RD;
if (sr_fd->srf_flags & SFF_SELECT_W) m_ops |= SEL_WR;
if (sr_fd->srf_flags & SFF_SELECT_X) m_ops |= SEL_ERR;
sr_fd->srf_flags &= ~(SFF_SELECT_R|SFF_SELECT_W|SFF_SELECT_X);
m->m_type= DEV_IO_READY;
m->DEV_MINOR= fd;
m->DEV_SEL_OPS= m_ops;
result= send(m->m_source, m);
if (result != OK)
ip_panic(("unable to send"));
return;
}
m->m_type= DEV_NO_STATUS;
result= send(m->m_source, m);
if (result != OK)
ip_panic(("unable to send"));
}
2012-03-25 20:25:53 +02:00
static int walk_queue(sr_fd, q_head_ptr, q_tail_ptr, type, proc_nr, ref,
first_flag)
2005-04-21 16:53:53 +02:00
sr_fd_t *sr_fd;
2005-08-08 17:49:16 +02:00
mq_t **q_head_ptr;
mq_t **q_tail_ptr;
2005-04-21 16:53:53 +02:00
int type;
int proc_nr;
int ref;
int first_flag;
2005-04-21 16:53:53 +02:00
{
mq_t *q_ptr_prv, *q_ptr;
int result;
2005-08-08 17:49:16 +02:00
for(q_ptr_prv= NULL, q_ptr= *q_head_ptr; q_ptr;
2005-04-21 16:53:53 +02:00
q_ptr_prv= q_ptr, q_ptr= q_ptr->mq_next)
{
if (q_ptr->mq_mess.USER_ENDPT != proc_nr)
2005-04-21 16:53:53 +02:00
continue;
if ((int)q_ptr->mq_mess.IO_GRANT != ref)
continue;
2005-04-21 16:53:53 +02:00
if (!q_ptr_prv)
{
assert(!(sr_fd->srf_flags & first_flag));
sr_fd->srf_flags |= first_flag;
2005-04-21 16:53:53 +02:00
result= (*sr_fd->srf_cancel)(sr_fd->srf_fd, type);
assert(result == OK);
2005-08-08 17:49:16 +02:00
*q_head_ptr= q_ptr->mq_next;
mq_free(q_ptr);
assert(sr_fd->srf_flags & first_flag);
sr_fd->srf_flags &= ~first_flag;
2005-04-21 16:53:53 +02:00
return OK;
}
q_ptr_prv->mq_next= q_ptr->mq_next;
mq_free(q_ptr);
if (!q_ptr_prv->mq_next)
*q_tail_ptr= q_ptr_prv;
return EINTR;
}
return EAGAIN;
}
2012-03-25 20:25:53 +02:00
static sr_fd_t *sr_getchannel(minor)
2005-04-21 16:53:53 +02:00
int minor;
{
sr_fd_t *loc_fd;
compare(minor, >=, 0);
compare(minor, <, FD_NR);
loc_fd= &sr_fd_table[minor];
assert (!(loc_fd->srf_flags & SFF_MINOR) &&
(loc_fd->srf_flags & SFF_INUSE));
return loc_fd;
}
2012-03-25 20:25:53 +02:00
static void sr_reply_(mq, status, is_revive)
2005-04-21 16:53:53 +02:00
mq_t *mq;
int status;
int is_revive;
2005-04-21 16:53:53 +02:00
{
int result, proc, ref;
2005-04-21 16:53:53 +02:00
message reply, *mp;
proc= mq->mq_mess.USER_ENDPT;
ref= (int)mq->mq_mess.IO_GRANT;
2005-04-21 16:53:53 +02:00
if (is_revive)
2005-04-21 16:53:53 +02:00
mp= &mq->mq_mess;
else
mp= &reply;
mp->m_type= TASK_REPLY;
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
mp->REP_ENDPT= proc;
2005-04-21 16:53:53 +02:00
mp->REP_STATUS= status;
mp->REP_IO_GRANT= ref;
if (is_revive)
{
notify(mq->mq_mess.m_source);
result= ELOCKED;
}
else
{
result= send(mq->mq_mess.m_source, mp);
}
if (result == ELOCKED && is_revive)
2005-04-21 16:53:53 +02:00
{
mq->mq_next= NULL;
2005-04-21 16:53:53 +02:00
if (repl_queue)
repl_queue_tail->mq_next= mq;
else
repl_queue= mq;
repl_queue_tail= mq;
return;
}
if (result != OK)
ip_panic(("unable to send"));
if (is_revive)
2005-04-21 16:53:53 +02:00
mq_free(mq);
}
2012-03-25 20:25:53 +02:00
static acc_t *sr_get_userdata (fd, offset, count, for_ioctl)
2005-04-21 16:53:53 +02:00
int fd;
size_t offset;
size_t count;
2005-04-21 16:53:53 +02:00
int for_ioctl;
{
sr_fd_t *loc_fd;
mq_t **head_ptr, *m, *mq;
int ip_flag, susp_flag, first_flag;
int result, suspended, is_revive;
2005-04-21 16:53:53 +02:00
acc_t *acc;
event_t *evp;
ev_arg_t arg;
2005-04-21 16:53:53 +02:00
loc_fd= &sr_fd_table[fd];
if (for_ioctl)
{
head_ptr= &loc_fd->srf_ioctl_q;
evp= &loc_fd->srf_ioctl_ev;
2005-04-21 16:53:53 +02:00
ip_flag= SFF_IOCTL_IP;
susp_flag= SFF_IOCTL_SUSP;
first_flag= SFF_IOCTL_FIRST;
2005-04-21 16:53:53 +02:00
}
else
{
head_ptr= &loc_fd->srf_write_q;
evp= &loc_fd->srf_write_ev;
2005-04-21 16:53:53 +02:00
ip_flag= SFF_WRITE_IP;
susp_flag= SFF_WRITE_SUSP;
first_flag= SFF_WRITE_FIRST;
2005-04-21 16:53:53 +02:00
}
assert (loc_fd->srf_flags & ip_flag);
2005-04-21 16:53:53 +02:00
if (!count)
{
m= *head_ptr;
mq= m->mq_next;
*head_ptr= mq;
2005-04-21 16:53:53 +02:00
result= (int)offset;
is_revive= !(loc_fd->srf_flags & first_flag);
sr_reply_(m, result, is_revive);
2005-04-21 16:53:53 +02:00
suspended= (loc_fd->srf_flags & susp_flag);
loc_fd->srf_flags &= ~(ip_flag|susp_flag);
if (suspended)
{
if (mq)
{
arg.ev_ptr= loc_fd;
ev_enqueue(evp, sr_event, arg);
}
2005-04-21 16:53:53 +02:00
}
return NULL;
}
result= cp_u2b ((*head_ptr)->mq_mess.m_source,
(int)(*head_ptr)->mq_mess.IO_GRANT, offset, &acc, count);
2005-04-21 16:53:53 +02:00
return result<0 ? NULL : acc;
}
2012-03-25 20:25:53 +02:00
static int sr_put_userdata (fd, offset, data, for_ioctl)
2005-04-21 16:53:53 +02:00
int fd;
size_t offset;
2005-04-21 16:53:53 +02:00
acc_t *data;
int for_ioctl;
{
sr_fd_t *loc_fd;
mq_t **head_ptr, *m, *mq;
int ip_flag, susp_flag, first_flag;
int result, suspended, is_revive;
event_t *evp;
ev_arg_t arg;
2005-04-21 16:53:53 +02:00
loc_fd= &sr_fd_table[fd];
if (for_ioctl)
{
head_ptr= &loc_fd->srf_ioctl_q;
evp= &loc_fd->srf_ioctl_ev;
2005-04-21 16:53:53 +02:00
ip_flag= SFF_IOCTL_IP;
susp_flag= SFF_IOCTL_SUSP;
first_flag= SFF_IOCTL_FIRST;
2005-04-21 16:53:53 +02:00
}
else
{
head_ptr= &loc_fd->srf_read_q;
evp= &loc_fd->srf_read_ev;
2005-04-21 16:53:53 +02:00
ip_flag= SFF_READ_IP;
susp_flag= SFF_READ_SUSP;
first_flag= SFF_READ_FIRST;
2005-04-21 16:53:53 +02:00
}
assert (loc_fd->srf_flags & ip_flag);
if (!data)
{
m= *head_ptr;
mq= m->mq_next;
*head_ptr= mq;
2005-04-21 16:53:53 +02:00
result= (int)offset;
is_revive= !(loc_fd->srf_flags & first_flag);
sr_reply_(m, result, is_revive);
2005-04-21 16:53:53 +02:00
suspended= (loc_fd->srf_flags & susp_flag);
loc_fd->srf_flags &= ~(ip_flag|susp_flag);
if (suspended)
{
if (mq)
{
arg.ev_ptr= loc_fd;
ev_enqueue(evp, sr_event, arg);
}
2005-04-21 16:53:53 +02:00
}
return OK;
}
return cp_b2u (data, (*head_ptr)->mq_mess.m_source,
(int)(*head_ptr)->mq_mess.IO_GRANT, offset);
2005-04-21 16:53:53 +02:00
}
2012-03-25 20:25:53 +02:00
static void sr_select_res(int fd, unsigned ops)
{
sr_fd_t *sr_fd;
sr_fd= &sr_fd_table[fd];
if (ops & SR_SELECT_READ) sr_fd->srf_flags |= SFF_SELECT_R;
if (ops & SR_SELECT_WRITE) sr_fd->srf_flags |= SFF_SELECT_W;
if (ops & SR_SELECT_EXCEPTION) sr_fd->srf_flags |= SFF_SELECT_X;
notify(sr_fd->srf_select_proc);
}
2012-03-25 20:25:53 +02:00
static void sr_event(evp, arg)
event_t *evp;
ev_arg_t arg;
{
sr_fd_t *sr_fd;
int r;
sr_fd= arg.ev_ptr;
if (evp == &sr_fd->srf_write_ev)
{
while(sr_fd->srf_write_q)
{
r= sr_restart_write(sr_fd);
if (r == SUSPEND)
return;
}
return;
}
if (evp == &sr_fd->srf_read_ev)
{
while(sr_fd->srf_read_q)
{
r= sr_restart_read(sr_fd);
if (r == SUSPEND)
return;
}
return;
}
if (evp == &sr_fd->srf_ioctl_ev)
{
while(sr_fd->srf_ioctl_q)
{
r= sr_restart_ioctl(sr_fd);
if (r == SUSPEND)
return;
}
return;
}
ip_panic(("sr_event: unknown event\n"));
}
2012-03-25 20:25:53 +02:00
static int cp_u2b(proc, gid, offset, var_acc_ptr, size)
endpoint_t proc;
cp_grant_id_t gid;
2006-06-26 16:20:11 +02:00
vir_bytes offset;
acc_t **var_acc_ptr;
int size;
{
acc_t *acc;
2006-06-26 16:20:11 +02:00
int i, r;
acc= bf_memreq(size);
*var_acc_ptr= acc;
i=0;
while (acc)
{
size= (vir_bytes)acc->acc_length;
2006-06-26 16:20:11 +02:00
s_cp_req[i].v_from= proc;
s_cp_req[i].v_to= SELF;
s_cp_req[i].v_gid= gid;
s_cp_req[i].v_offset= offset;
s_cp_req[i].v_addr= (vir_bytes) ptr2acc_data(acc);
s_cp_req[i].v_bytes= size;
2006-06-26 16:20:11 +02:00
offset += size;
acc= acc->acc_next;
i++;
2006-06-26 16:20:11 +02:00
if (acc == NULL && i == 1)
{
2006-06-26 16:20:11 +02:00
r= sys_safecopyfrom(s_cp_req[0].v_from,
s_cp_req[0].v_gid, s_cp_req[0].v_offset,
s_cp_req[0].v_addr, s_cp_req[0].v_bytes, D);
if (r <0)
{
2006-06-26 16:20:11 +02:00
printf("sys_safecopyfrom failed: %d\n", r);
bf_afree(*var_acc_ptr);
*var_acc_ptr= 0;
2006-06-26 16:20:11 +02:00
return r;
}
i= 0;
2006-06-26 16:20:11 +02:00
continue;
}
if (i == SCPVEC_NR || acc == NULL)
{
2006-06-26 16:20:11 +02:00
r= sys_vsafecopy(s_cp_req, i);
2006-06-26 16:20:11 +02:00
if (r <0)
{
printf("cp_u2b: sys_vsafecopy failed: %d\n",
2006-06-26 16:20:11 +02:00
r);
bf_afree(*var_acc_ptr);
*var_acc_ptr= 0;
return r;
}
i= 0;
}
}
return OK;
}
2012-03-25 20:25:53 +02:00
static int cp_b2u(acc_ptr, proc, gid, offset)
acc_t *acc_ptr;
endpoint_t proc;
cp_grant_id_t gid;
vir_bytes offset;
{
acc_t *acc;
2006-06-26 16:20:11 +02:00
int i, r, size;
acc= acc_ptr;
i=0;
while (acc)
{
size= (vir_bytes)acc->acc_length;
if (size)
{
2006-06-26 16:20:11 +02:00
s_cp_req[i].v_from= SELF;
s_cp_req[i].v_to= proc;
s_cp_req[i].v_gid= gid;
s_cp_req[i].v_offset= offset;
s_cp_req[i].v_addr= (vir_bytes) ptr2acc_data(acc);
s_cp_req[i].v_bytes= size;
i++;
}
2006-06-26 16:20:11 +02:00
offset += size;
acc= acc->acc_next;
2006-06-26 16:20:11 +02:00
if (acc == NULL && i == 1)
{
2006-06-26 16:20:11 +02:00
r= sys_safecopyto(s_cp_req[0].v_to,
s_cp_req[0].v_gid, s_cp_req[0].v_offset,
s_cp_req[0].v_addr, s_cp_req[0].v_bytes, D);
if (r <0)
{
2006-06-26 16:20:11 +02:00
printf("sys_safecopyto failed: %d\n", r);
bf_afree(acc_ptr);
2006-06-26 16:20:11 +02:00
return r;
}
i= 0;
2006-06-26 16:20:11 +02:00
continue;
}
if (i == SCPVEC_NR || acc == NULL)
{
2006-06-26 16:20:11 +02:00
r= sys_vsafecopy(s_cp_req, i);
if (r <0)
{
printf("cp_b2u: sys_vsafecopy failed: %d\n",
r);
bf_afree(acc_ptr);
return r;
}
2006-06-26 16:20:11 +02:00
i= 0;
}
}
bf_afree(acc_ptr);
return OK;
}
2012-03-25 20:25:53 +02:00
static int sr_repl_queue(proc, ref, operation)
2005-04-21 16:53:53 +02:00
int proc;
int ref;
int operation;
{
mq_t *m, *m_cancel, *m_tmp;
mq_t *new_queue;
2005-04-21 16:53:53 +02:00
int result;
m_cancel= NULL;
new_queue= NULL;
2005-04-21 16:53:53 +02:00
for (m= repl_queue; m;)
{
if (m->mq_mess.REP_ENDPT == proc &&
m->mq_mess.REP_IO_GRANT == ref)
2005-04-21 16:53:53 +02:00
{
assert(!m_cancel);
2005-04-21 16:53:53 +02:00
m_cancel= m;
m= m->mq_next;
continue;
}
m_tmp= m;
m= m->mq_next;
m_tmp->mq_next= new_queue;
new_queue= m_tmp;
2005-04-21 16:53:53 +02:00
}
repl_queue= new_queue;
2005-04-21 16:53:53 +02:00
if (m_cancel)
{
result= send(m_cancel->mq_mess.m_source, &m_cancel->mq_mess);
if (result != OK)
ip_panic(("unable to send: %d", result));
mq_free(m_cancel);
return 1;
}
return 0;
}
/*
* $PchId: sr.c,v 1.17 2005/06/28 14:26:16 philip Exp $
2005-04-21 16:53:53 +02:00
*/