2005-07-14 17:12:12 +02:00
|
|
|
/* This file implements kernel debugging functionality that is not included
|
|
|
|
* in the standard kernel. Available functionality includes timing of lock
|
|
|
|
* functions and sanity checking of the scheduling queues.
|
2005-04-21 16:53:53 +02:00
|
|
|
*/
|
|
|
|
|
2012-11-15 12:06:41 +01:00
|
|
|
#include "kernel/kernel.h"
|
2008-11-19 13:26:10 +01:00
|
|
|
|
2010-06-24 15:31:40 +02:00
|
|
|
#include <minix/callnr.h>
|
2008-11-19 13:26:10 +01:00
|
|
|
#include <minix/sysutil.h>
|
2010-11-18 16:12:01 +01:00
|
|
|
#include <minix/u64.h>
|
2005-06-01 11:37:52 +02:00
|
|
|
#include <limits.h>
|
2008-11-19 13:26:10 +01:00
|
|
|
#include <string.h>
|
2011-02-08 14:54:33 +01:00
|
|
|
#include <assert.h>
|
2005-06-01 11:37:52 +02:00
|
|
|
|
2007-03-15 11:57:39 +01:00
|
|
|
#define MAX_LOOP (NR_PROCS + NR_TASKS)
|
2005-05-24 14:33:03 +02:00
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
int runqueues_ok_cpu(unsigned cpu)
|
2005-05-24 14:33:03 +02:00
|
|
|
{
|
|
|
|
int q, l = 0;
|
|
|
|
register struct proc *xp;
|
2010-09-15 16:10:18 +02:00
|
|
|
struct proc **rdy_head, **rdy_tail;
|
|
|
|
|
|
|
|
rdy_head = get_cpu_var(cpu, run_q_head);
|
|
|
|
rdy_tail = get_cpu_var(cpu, run_q_tail);
|
2009-05-12 13:35:01 +02:00
|
|
|
|
2005-05-24 14:33:03 +02:00
|
|
|
for (xp = BEG_PROC_ADDR; xp < END_PROC_ADDR; ++xp) {
|
|
|
|
xp->p_found = 0;
|
2010-03-10 14:00:05 +01:00
|
|
|
if (l++ > MAX_LOOP) panic("check error");
|
2005-05-24 14:33:03 +02:00
|
|
|
}
|
|
|
|
|
2007-03-15 11:57:39 +01:00
|
|
|
for (q=l=0; q < NR_SCHED_QUEUES; q++) {
|
2005-09-11 18:44:06 +02:00
|
|
|
if (rdy_head[q] && !rdy_tail[q]) {
|
2010-03-03 16:45:01 +01:00
|
|
|
printf("head but no tail in %d\n", q);
|
2010-03-10 14:00:05 +01:00
|
|
|
return 0;
|
2005-05-24 14:33:03 +02:00
|
|
|
}
|
2005-09-11 18:44:06 +02:00
|
|
|
if (!rdy_head[q] && rdy_tail[q]) {
|
2010-03-03 16:45:01 +01:00
|
|
|
printf("tail but no head in %d\n", q);
|
2010-03-10 14:00:05 +01:00
|
|
|
return 0;
|
2005-05-24 14:33:03 +02:00
|
|
|
}
|
2010-03-28 11:54:32 +02:00
|
|
|
if (rdy_tail[q] && rdy_tail[q]->p_nextready) {
|
2010-03-03 16:45:01 +01:00
|
|
|
printf("tail and tail->next not null in %d\n", q);
|
2010-03-10 14:00:05 +01:00
|
|
|
return 0;
|
2005-05-24 14:33:03 +02:00
|
|
|
}
|
2010-03-28 11:54:32 +02:00
|
|
|
for(xp = rdy_head[q]; xp; xp = xp->p_nextready) {
|
2010-03-27 15:31:00 +01:00
|
|
|
const vir_bytes vxp = (vir_bytes) xp;
|
|
|
|
vir_bytes dxp;
|
2009-05-12 13:35:01 +02:00
|
|
|
if(vxp < (vir_bytes) BEG_PROC_ADDR || vxp >= (vir_bytes) END_PROC_ADDR) {
|
2010-03-10 14:00:05 +01:00
|
|
|
printf("xp out of range\n");
|
|
|
|
return 0;
|
2009-05-12 13:35:01 +02:00
|
|
|
}
|
|
|
|
dxp = vxp - (vir_bytes) BEG_PROC_ADDR;
|
|
|
|
if(dxp % sizeof(struct proc)) {
|
2010-03-10 14:00:05 +01:00
|
|
|
printf("xp not a real pointer");
|
|
|
|
return 0;
|
2009-05-12 13:35:01 +02:00
|
|
|
}
|
2010-03-10 14:00:05 +01:00
|
|
|
if(!proc_ptr_ok(xp)) {
|
|
|
|
printf("xp bogus pointer");
|
|
|
|
return 0;
|
2009-05-12 13:35:01 +02:00
|
|
|
}
|
2009-11-10 10:11:13 +01:00
|
|
|
if (RTS_ISSET(xp, RTS_SLOT_FREE)) {
|
2010-03-03 16:45:01 +01:00
|
|
|
printf("scheduling error: dead proc q %d %d\n",
|
2009-05-12 13:35:01 +02:00
|
|
|
q, xp->p_endpoint);
|
2010-03-10 14:00:05 +01:00
|
|
|
return 0;
|
2009-05-12 13:35:01 +02:00
|
|
|
}
|
2010-03-10 14:00:05 +01:00
|
|
|
if (!proc_is_runnable(xp)) {
|
2010-03-03 16:45:01 +01:00
|
|
|
printf("scheduling error: unready on runq %d proc %d\n",
|
2008-11-19 13:26:10 +01:00
|
|
|
q, xp->p_nr);
|
2010-03-10 14:00:05 +01:00
|
|
|
return 0;
|
2005-05-24 14:33:03 +02:00
|
|
|
}
|
2005-09-11 18:44:06 +02:00
|
|
|
if (xp->p_priority != q) {
|
2010-03-03 16:45:01 +01:00
|
|
|
printf("scheduling error: wrong priority q %d proc %d ep %d name %s\n",
|
2009-05-12 13:35:01 +02:00
|
|
|
q, xp->p_nr, xp->p_endpoint, xp->p_name);
|
2010-03-10 14:00:05 +01:00
|
|
|
return 0;
|
2005-05-24 14:33:03 +02:00
|
|
|
}
|
2005-09-11 18:44:06 +02:00
|
|
|
if (xp->p_found) {
|
2010-03-03 16:45:01 +01:00
|
|
|
printf("scheduling error: double sched q %d proc %d\n",
|
2008-11-19 13:26:10 +01:00
|
|
|
q, xp->p_nr);
|
2010-03-10 14:00:05 +01:00
|
|
|
return 0;
|
2005-05-24 14:33:03 +02:00
|
|
|
}
|
|
|
|
xp->p_found = 1;
|
2010-03-28 11:54:32 +02:00
|
|
|
if (!xp->p_nextready && rdy_tail[q] != xp) {
|
2010-03-03 16:45:01 +01:00
|
|
|
printf("sched err: last element not tail q %d proc %d\n",
|
2008-11-19 13:26:10 +01:00
|
|
|
q, xp->p_nr);
|
2010-03-10 14:00:05 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (l++ > MAX_LOOP) {
|
|
|
|
printf("loop in schedule queue?");
|
|
|
|
return 0;
|
2005-05-24 14:33:03 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (xp = BEG_PROC_ADDR; xp < END_PROC_ADDR; ++xp) {
|
2010-03-10 14:00:05 +01:00
|
|
|
if(!proc_ptr_ok(xp)) {
|
|
|
|
printf("xp bogus pointer in proc table\n");
|
|
|
|
return 0;
|
|
|
|
}
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
if (isemptyp(xp))
|
|
|
|
continue;
|
2010-03-10 14:00:05 +01:00
|
|
|
if(proc_is_runnable(xp) && !xp->p_found) {
|
2010-03-03 16:45:01 +01:00
|
|
|
printf("sched error: ready proc %d not on queue\n", xp->p_nr);
|
2010-03-10 14:00:05 +01:00
|
|
|
return 0;
|
2005-05-24 14:33:03 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-03-10 14:00:05 +01:00
|
|
|
/* All is ok. */
|
|
|
|
return 1;
|
|
|
|
}
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
|
2010-09-15 16:10:18 +02:00
|
|
|
#ifdef CONFIG_SMP
|
2012-03-25 20:25:53 +02:00
|
|
|
static int runqueues_ok_all(void)
|
2010-09-15 16:10:18 +02:00
|
|
|
{
|
|
|
|
unsigned c;
|
|
|
|
|
|
|
|
for (c = 0 ; c < ncpus; c++) {
|
|
|
|
if (!runqueues_ok_cpu(c))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
int runqueues_ok(void)
|
2010-09-15 16:10:18 +02:00
|
|
|
{
|
|
|
|
return runqueues_ok_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
int runqueues_ok(void)
|
2010-09-15 16:10:18 +02:00
|
|
|
{
|
|
|
|
return runqueues_ok_cpu(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
char *
|
2010-09-23 12:49:36 +02:00
|
|
|
rtsflagstr(const u32_t flags)
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
{
|
|
|
|
static char str[100];
|
|
|
|
str[0] = '\0';
|
|
|
|
|
2012-08-15 13:12:11 +02:00
|
|
|
#define FLAG(n) if(flags & n) { strlcat(str, #n " ", sizeof(str)); }
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
|
2009-11-10 10:11:13 +01:00
|
|
|
FLAG(RTS_SLOT_FREE);
|
|
|
|
FLAG(RTS_PROC_STOP);
|
|
|
|
FLAG(RTS_SENDING);
|
|
|
|
FLAG(RTS_RECEIVING);
|
|
|
|
FLAG(RTS_SIGNALED);
|
|
|
|
FLAG(RTS_SIG_PENDING);
|
|
|
|
FLAG(RTS_P_STOP);
|
|
|
|
FLAG(RTS_NO_PRIV);
|
|
|
|
FLAG(RTS_NO_ENDPOINT);
|
|
|
|
FLAG(RTS_VMINHIBIT);
|
|
|
|
FLAG(RTS_PAGEFAULT);
|
|
|
|
FLAG(RTS_VMREQUEST);
|
|
|
|
FLAG(RTS_VMREQTARGET);
|
|
|
|
FLAG(RTS_PREEMPTED);
|
|
|
|
FLAG(RTS_NO_QUANTUM);
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
|
|
|
|
return str;
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
char *
|
2010-09-23 12:49:36 +02:00
|
|
|
miscflagstr(const u32_t flags)
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
{
|
|
|
|
static char str[100];
|
|
|
|
str[0] = '\0';
|
|
|
|
|
|
|
|
FLAG(MF_REPLY_PEND);
|
|
|
|
FLAG(MF_DELIVERMSG);
|
2010-02-09 16:20:09 +01:00
|
|
|
FLAG(MF_KCALL_RESUME);
|
Primary goal for these changes is:
- no longer have kernel have its own page table that is loaded
on every kernel entry (trap, interrupt, exception). the primary
purpose is to reduce the number of required reloads.
Result:
- kernel can only access memory of process that was running when
kernel was entered
- kernel must be mapped into every process page table, so traps to
kernel keep working
Problem:
- kernel must often access memory of arbitrary processes (e.g. send
arbitrary processes messages); this can't happen directly any more;
usually because that process' page table isn't loaded at all, sometimes
because that memory isn't mapped in at all, sometimes because it isn't
mapped in read-write.
So:
- kernel must be able to map in memory of any process, in its own
address space.
Implementation:
- VM and kernel share a range of memory in which addresses of
all page tables of all processes are available. This has two purposes:
. Kernel has to know what data to copy in order to map in a range
. Kernel has to know where to write the data in order to map it in
That last point is because kernel has to write in the currently loaded
page table.
- Processes and kernel are separated through segments; kernel segments
haven't changed.
- The kernel keeps the process whose page table is currently loaded
in 'ptproc.'
- If it wants to map in a range of memory, it writes the value of the
page directory entry for that range into the page directory entry
in the currently loaded map. There is a slot reserved for such
purposes. The kernel can then access this memory directly.
- In order to do this, its segment has been increased (and the
segments of processes start where it ends).
- In the pagefault handler, detect if the kernel is doing
'trappable' memory access (i.e. a pagefault isn't a fatal
error) and if so,
- set the saved instruction pointer to phys_copy_fault,
breaking out of phys_copy
- set the saved eax register to the address of the page
fault, both for sanity checking and for checking in
which of the two ranges that phys_copy was called
with the fault occured
- Some boot-time processes do not have their own page table,
and are mapped in with the kernel, and separated with
segments. The kernel detects this using HASPT. If such a
process has to be scheduled, any page table will work and
no page table switch is done.
Major changes in kernel are
- When accessing user processes memory, kernel no longer
explicitly checks before it does so if that memory is OK.
It simply makes the mapping (if necessary), tries to do the
operation, and traps the pagefault if that memory isn't present;
if that happens, the copy function returns EFAULT.
So all of the CHECKRANGE_OR_SUSPEND macros are gone.
- Kernel no longer has to copy/read and parse page tables.
- A message copying optimisation: when messages are copied, and
the recipient isn't mapped in, they are copied into a buffer
in the kernel. This is done in QueueMess. The next time
the recipient is scheduled, this message is copied into
its memory. This happens in schedcheck().
This eliminates the mapping/copying step for messages, and makes
it easier to deliver messages. This eliminates soft_notify.
- Kernel no longer creates a page table at all, so the vm_setbuf
and pagetable writing in memory.c is gone.
Minor changes in kernel are
- ipc_stats thrown out, wasn't used
- misc flags all renamed to MF_*
- NOREC_* macros to enter and leave functions that should not
be called recursively; just sanity checks really
- code to fully decode segment selectors and descriptors
to print on exceptions
- lots of vmassert()s added, only executed if DEBUG_VMASSERT is 1
2009-09-21 16:31:52 +02:00
|
|
|
|
|
|
|
return str;
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
char *
|
Userspace scheduling
- cotributed by Bjorn Swift
- In this first phase, scheduling is moved from the kernel to the PM
server. The next steps are to a) moving scheduling to its own server
and b) include useful information in the "out of quantum" message,
so that the scheduler can make use of this information.
- The kernel process table now keeps record of who is responsible for
scheduling each process (p_scheduler). When this pointer is NULL,
the process will be scheduled by the kernel. If such a process runs
out of quantum, the kernel will simply renew its quantum an requeue
it.
- When PM loads, it will take over scheduling of all running
processes, except system processes, using sys_schedctl().
Essentially, this only results in taking over init. As children
inherit a scheduler from their parent, user space programs forked by
init will inherit PM (for now) as their scheduler.
- Once a process has been assigned a scheduler, and runs out of
quantum, its RTS_NO_QUANTUM flag will be set and the process
dequeued. The kernel will send a message to the scheduler, on the
process' behalf, informing the scheduler that it has run out of
quantum. The scheduler can take what ever action it pleases, based
on its policy, and then reschedule the process using the
sys_schedule() system call.
- Balance queues does not work as before. While the old in-kernel
function used to renew the quantum of processes in the highest
priority run queue, the user-space implementation only acts on
processes that have been bumped down to a lower priority queue.
This approach reacts slower to changes than the old one, but saves
us sending a sys_schedule message for each process every time we
balance the queues. Currently, when processes are moved up a
priority queue, their quantum is also renewed, but this can be
fiddled with.
- do_nice has been removed from kernel. PM answers to get- and
setpriority calls, updates it's own nice variable as well as the
max_run_queue. This will be refactored once scheduling is moved to a
separate server. We will probably have PM update it's local nice
value and then send a message to whoever is scheduling the process.
- changes to fix an issue in do_fork() where processes could run out
of quantum but bypassing the code path that handles it correctly.
The future plan is to remove the policy from do_fork() and implement
it in userspace too.
2010-03-29 13:07:20 +02:00
|
|
|
schedulerstr(struct proc *scheduler)
|
|
|
|
{
|
|
|
|
if (scheduler != NULL)
|
|
|
|
{
|
|
|
|
return scheduler->p_name;
|
|
|
|
}
|
|
|
|
|
|
|
|
return "KERNEL";
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static void
|
2010-07-07 00:05:21 +02:00
|
|
|
print_proc_name(struct proc *pp)
|
|
|
|
{
|
|
|
|
char *name = pp->p_name;
|
|
|
|
endpoint_t ep = pp->p_endpoint;
|
|
|
|
|
|
|
|
if(name) {
|
|
|
|
printf("%s(%d)", name, ep);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
printf("%d", ep);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static void
|
2010-07-07 00:05:21 +02:00
|
|
|
print_endpoint(endpoint_t ep)
|
|
|
|
{
|
|
|
|
int proc_nr;
|
|
|
|
struct proc *pp = NULL;
|
|
|
|
|
|
|
|
switch(ep) {
|
|
|
|
case ANY:
|
|
|
|
printf("ANY");
|
|
|
|
break;
|
|
|
|
case SELF:
|
|
|
|
printf("SELF");
|
|
|
|
break;
|
|
|
|
case NONE:
|
|
|
|
printf("NONE");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if(!isokendpt(ep, &proc_nr)) {
|
|
|
|
printf("??? %d\n", ep);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
pp = proc_addr(proc_nr);
|
|
|
|
if(isemptyp(pp)) {
|
|
|
|
printf("??? empty slot %d\n", proc_nr);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
print_proc_name(pp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static void
|
2010-07-07 00:05:21 +02:00
|
|
|
print_sigmgr(struct proc *pp)
|
|
|
|
{
|
|
|
|
endpoint_t sig_mgr, bak_sig_mgr;
|
2012-12-22 00:26:48 +01:00
|
|
|
sig_mgr = priv(pp) ? priv(pp)->s_sig_mgr : NONE;
|
|
|
|
bak_sig_mgr = priv(pp) ? priv(pp)->s_bak_sig_mgr : NONE;
|
|
|
|
if(sig_mgr == NONE) { printf("no sigmgr"); return; }
|
2010-07-07 00:05:21 +02:00
|
|
|
printf("sigmgr ");
|
|
|
|
print_endpoint(sig_mgr);
|
|
|
|
if(bak_sig_mgr != NONE) {
|
|
|
|
printf(" / ");
|
|
|
|
print_endpoint(bak_sig_mgr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void print_proc(struct proc *pp)
|
2010-05-03 19:37:18 +02:00
|
|
|
{
|
|
|
|
endpoint_t dep;
|
|
|
|
|
2010-09-15 16:11:01 +02:00
|
|
|
printf("%d: %s %d prio %d time %d/%d cycles 0x%x%08x cpu %2d "
|
2012-10-08 03:38:03 +02:00
|
|
|
"pdbr 0x%lx rts %s misc %s sched %s ",
|
2010-09-15 16:11:01 +02:00
|
|
|
proc_nr(pp), pp->p_name, pp->p_endpoint,
|
2010-05-03 19:37:18 +02:00
|
|
|
pp->p_priority, pp->p_user_time,
|
2010-11-08 00:35:29 +01:00
|
|
|
pp->p_sys_time, ex64hi(pp->p_cycles),
|
|
|
|
ex64lo(pp->p_cycles), pp->p_cpu,
|
2012-10-08 03:38:03 +02:00
|
|
|
#if defined(__i386__)
|
2010-09-15 16:11:01 +02:00
|
|
|
pp->p_seg.p_cr3,
|
2012-10-08 03:38:03 +02:00
|
|
|
#elif defined(__arm__)
|
|
|
|
pp->p_seg.p_ttbr,
|
|
|
|
#endif
|
2010-05-03 19:37:18 +02:00
|
|
|
rtsflagstr(pp->p_rts_flags), miscflagstr(pp->p_misc_flags),
|
|
|
|
schedulerstr(pp->p_scheduler));
|
|
|
|
|
2010-07-07 00:05:21 +02:00
|
|
|
print_sigmgr(pp);
|
|
|
|
|
2010-05-03 19:37:18 +02:00
|
|
|
dep = P_BLOCKEDON(pp);
|
|
|
|
if(dep != NONE) {
|
|
|
|
printf(" blocked on: ");
|
2010-07-07 00:05:21 +02:00
|
|
|
print_endpoint(dep);
|
2010-05-03 19:37:18 +02:00
|
|
|
}
|
2010-07-07 00:05:21 +02:00
|
|
|
printf("\n");
|
2010-05-03 19:37:18 +02:00
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static void print_proc_depends(struct proc *pp, const int level)
|
2010-05-03 19:37:18 +02:00
|
|
|
{
|
|
|
|
struct proc *depproc = NULL;
|
|
|
|
endpoint_t dep;
|
|
|
|
#define COL { int i; for(i = 0; i < level; i++) printf("> "); }
|
|
|
|
|
|
|
|
if(level >= NR_PROCS) {
|
|
|
|
printf("loop??\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
COL
|
|
|
|
|
|
|
|
print_proc(pp);
|
|
|
|
|
|
|
|
COL
|
|
|
|
proc_stacktrace(pp);
|
|
|
|
|
|
|
|
|
|
|
|
dep = P_BLOCKEDON(pp);
|
2010-05-07 00:39:11 +02:00
|
|
|
if(dep != NONE && dep != ANY) {
|
2010-05-03 19:37:18 +02:00
|
|
|
int procno;
|
|
|
|
if(isokendpt(dep, &procno)) {
|
|
|
|
depproc = proc_addr(procno);
|
|
|
|
if(isemptyp(depproc))
|
|
|
|
depproc = NULL;
|
|
|
|
}
|
|
|
|
if (depproc)
|
|
|
|
print_proc_depends(depproc, level+1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void print_proc_recursive(struct proc *pp)
|
2010-05-03 19:37:18 +02:00
|
|
|
{
|
|
|
|
print_proc_depends(pp, 0);
|
|
|
|
}
|
|
|
|
|
2010-06-24 15:31:40 +02:00
|
|
|
#if DEBUG_DUMPIPC
|
2013-05-01 21:02:06 +02:00
|
|
|
static const char *mtypename(int mtype, int *possible_callname)
|
2010-06-24 15:31:40 +02:00
|
|
|
{
|
2013-05-01 21:02:06 +02:00
|
|
|
char *callname = NULL, *errname = NULL;
|
|
|
|
/* use generated file to recognize message types
|
|
|
|
*
|
|
|
|
* we try to match both error numbers and call numbers, as the
|
|
|
|
* reader can probably decide from context what's going on.
|
|
|
|
*
|
|
|
|
* whenever it might be a call number we tell the caller so the
|
|
|
|
* call message fields can be decoded if known.
|
|
|
|
*/
|
|
|
|
switch(mtype) {
|
|
|
|
#define IDENT(x) case x: callname = #x; *possible_callname = 1; break;
|
2013-09-24 10:32:11 +02:00
|
|
|
#include "kernel/extracted-mtype.h"
|
2010-11-29 09:30:30 +01:00
|
|
|
#undef IDENT
|
2013-05-01 21:02:06 +02:00
|
|
|
}
|
|
|
|
switch(mtype) {
|
|
|
|
#define IDENT(x) case x: errname = #x; break;
|
2013-09-24 10:32:11 +02:00
|
|
|
#include "kernel/extracted-errno.h"
|
2010-11-29 09:30:30 +01:00
|
|
|
#undef IDENT
|
2010-06-24 15:31:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* no match */
|
2013-05-01 21:02:06 +02:00
|
|
|
if(!errname && !callname)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* 2 matches */
|
|
|
|
if(errname && callname) {
|
|
|
|
static char typename[100];
|
|
|
|
strcpy(typename, errname);
|
|
|
|
strcat(typename, " / ");
|
|
|
|
strcat(typename, callname);
|
|
|
|
return typename;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(errname) return errname;
|
|
|
|
|
|
|
|
assert(callname);
|
|
|
|
return callname;
|
2010-06-24 15:31:40 +02:00
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static void printproc(struct proc *rp)
|
2011-02-08 14:54:33 +01:00
|
|
|
{
|
|
|
|
if (rp)
|
|
|
|
printf(" %s(%d)", rp->p_name, rp - proc);
|
|
|
|
else
|
|
|
|
printf(" kernel");
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static void printparam(const char *name, const void *data, size_t size)
|
2010-06-24 15:31:40 +02:00
|
|
|
{
|
|
|
|
printf(" %s=", name);
|
|
|
|
switch (size) {
|
|
|
|
case sizeof(char): printf("%d", *(char *) data); break;
|
|
|
|
case sizeof(short): printf("%d", *(short *) data); break;
|
|
|
|
case sizeof(int): printf("%d", *(int *) data); break;
|
|
|
|
default: printf("(%u bytes)", size); break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-01 21:02:06 +02:00
|
|
|
#ifdef DEBUG_DUMPIPC_NAMES
|
|
|
|
static int namematch(char **names, int nnames, char *name)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for(i = 0; i < nnames; i++)
|
|
|
|
if(!strcmp(names[i], name))
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static void printmsg(message *msg, struct proc *src, struct proc *dst,
|
2013-05-01 21:02:06 +02:00
|
|
|
char operation, int printparams)
|
2010-06-24 15:31:40 +02:00
|
|
|
{
|
|
|
|
const char *name;
|
2013-05-01 21:02:06 +02:00
|
|
|
int mtype = msg->m_type, mightbecall = 0;
|
|
|
|
|
|
|
|
#ifdef DEBUG_DUMPIPC_NAMES
|
|
|
|
{
|
|
|
|
char *names[] = DEBUG_DUMPIPC_NAMES;
|
|
|
|
int nnames = sizeof(names)/sizeof(names[0]);
|
|
|
|
|
|
|
|
/* skip printing messages for messages neither to
|
|
|
|
* or from DEBUG_DUMPIPC_EP if it is defined; either
|
|
|
|
* can be NULL to indicate kernel
|
|
|
|
*/
|
|
|
|
if(!(src && namematch(names, nnames, src->p_name)) &&
|
|
|
|
!(dst && namematch(names, nnames, dst->p_name))) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2010-06-24 15:31:40 +02:00
|
|
|
|
|
|
|
/* source, destination and message type */
|
|
|
|
printf("%c", operation);
|
|
|
|
printproc(src);
|
|
|
|
printproc(dst);
|
2013-05-01 21:02:06 +02:00
|
|
|
name = mtypename(mtype, &mightbecall);
|
2010-06-24 15:31:40 +02:00
|
|
|
if (name) {
|
2013-05-01 21:02:06 +02:00
|
|
|
printf(" %s(%d/0x%x)", name, mtype, mtype);
|
2010-06-24 15:31:40 +02:00
|
|
|
} else {
|
2013-05-01 21:02:06 +02:00
|
|
|
printf(" %d/0x%x", mtype, mtype);
|
2010-06-24 15:31:40 +02:00
|
|
|
}
|
|
|
|
|
2013-05-01 21:02:06 +02:00
|
|
|
if (mightbecall && printparams) {
|
2010-06-24 15:31:40 +02:00
|
|
|
#define IDENT(x, y) if (mtype == x) printparam(#y, &msg->y, sizeof(msg->y));
|
2013-09-24 10:32:11 +02:00
|
|
|
#include "kernel/extracted-mfield.h"
|
2010-06-24 15:31:40 +02:00
|
|
|
#undef IDENT
|
|
|
|
}
|
|
|
|
printf("\n");
|
|
|
|
}
|
2011-02-08 14:54:33 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if DEBUG_IPCSTATS
|
|
|
|
#define IPCPROCS (NR_PROCS+1) /* number of slots we need */
|
|
|
|
#define KERNELIPC NR_PROCS /* slot number to use for kernel calls */
|
2012-03-25 20:25:53 +02:00
|
|
|
static int messages[IPCPROCS][IPCPROCS];
|
2011-02-08 14:54:33 +01:00
|
|
|
|
|
|
|
#define PRINTSLOTS 20
|
2012-03-25 20:25:53 +02:00
|
|
|
static struct {
|
2011-02-08 14:54:33 +01:00
|
|
|
int src, dst, messages;
|
|
|
|
} winners[PRINTSLOTS];
|
2012-03-25 20:25:53 +02:00
|
|
|
static int total, goodslots;
|
2011-02-08 14:54:33 +01:00
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static void printstats(int ticks)
|
2011-02-08 14:54:33 +01:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for(i = 0; i < goodslots; i++) {
|
|
|
|
#define name(s) (s == KERNELIPC ? "kernel" : proc_addr(s)->p_name)
|
|
|
|
#define persec(n) (system_hz*(n)/ticks)
|
|
|
|
char *n1 = name(winners[i].src),
|
|
|
|
*n2 = name(winners[i].dst);
|
|
|
|
printf("%2d. %8s -> %8s %9d/s\n",
|
|
|
|
i, n1, n2, persec(winners[i].messages));
|
|
|
|
}
|
|
|
|
printf("total %d/s\n", persec(total));
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static void sortstats(void)
|
2011-02-08 14:54:33 +01:00
|
|
|
{
|
|
|
|
/* Print top message senders/receivers. */
|
|
|
|
int src_slot, dst_slot;
|
|
|
|
total = goodslots = 0;
|
|
|
|
for(src_slot = 0; src_slot < IPCPROCS; src_slot++) {
|
|
|
|
for(dst_slot = 0; dst_slot < IPCPROCS; dst_slot++) {
|
|
|
|
int w = PRINTSLOTS, rem,
|
|
|
|
n = messages[src_slot][dst_slot];
|
|
|
|
total += n;
|
|
|
|
while(w > 0 && n > winners[w-1].messages)
|
|
|
|
w--;
|
|
|
|
if(w >= PRINTSLOTS) continue;
|
|
|
|
|
|
|
|
/* This combination has beaten the current winners
|
|
|
|
* and should be inserted at position 'w.'
|
|
|
|
*/
|
|
|
|
rem = PRINTSLOTS-w-1;
|
|
|
|
assert(rem >= 0);
|
|
|
|
assert(rem < PRINTSLOTS);
|
|
|
|
if(rem > 0) {
|
|
|
|
assert(w+1 <= PRINTSLOTS-1);
|
|
|
|
assert(w >= 0);
|
|
|
|
memmove(&winners[w+1], &winners[w],
|
|
|
|
rem*sizeof(winners[0]));
|
|
|
|
}
|
|
|
|
winners[w].src = src_slot;
|
|
|
|
winners[w].dst = dst_slot;
|
|
|
|
winners[w].messages = n;
|
|
|
|
if(goodslots < PRINTSLOTS) goodslots++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-06-24 15:31:40 +02:00
|
|
|
|
2011-02-08 14:54:33 +01:00
|
|
|
#define proc2slot(p, s) { \
|
|
|
|
if(p) { s = p->p_nr; } \
|
|
|
|
else { s = KERNELIPC; } \
|
|
|
|
assert(s >= 0 && s < IPCPROCS); \
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static void statmsg(message *msg, struct proc *srcp, struct proc *dstp)
|
2010-06-24 15:31:40 +02:00
|
|
|
{
|
2011-02-08 14:54:33 +01:00
|
|
|
int src, dst, now, secs, dt;
|
|
|
|
static int lastprint;
|
|
|
|
|
|
|
|
/* Stat message. */
|
|
|
|
assert(src);
|
|
|
|
proc2slot(srcp, src);
|
|
|
|
proc2slot(dstp, dst);
|
|
|
|
messages[src][dst]++;
|
|
|
|
|
|
|
|
/* Print something? */
|
2013-03-29 20:34:29 +01:00
|
|
|
now = get_monotonic();
|
2011-02-08 14:54:33 +01:00
|
|
|
dt = now - lastprint;
|
|
|
|
secs = dt/system_hz;
|
|
|
|
if(secs >= 30) {
|
|
|
|
memset(winners, 0, sizeof(winners));
|
|
|
|
sortstats();
|
|
|
|
printstats(dt);
|
|
|
|
memset(messages, 0, sizeof(messages));
|
|
|
|
lastprint = now;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if DEBUG_IPC_HOOK
|
2012-03-25 20:25:53 +02:00
|
|
|
void hook_ipc_msgkcall(message *msg, struct proc *proc)
|
2011-02-08 14:54:33 +01:00
|
|
|
{
|
|
|
|
#if DEBUG_DUMPIPC
|
2013-05-01 21:02:06 +02:00
|
|
|
printmsg(msg, proc, NULL, 'k', 1);
|
2011-02-08 14:54:33 +01:00
|
|
|
#endif
|
2010-06-24 15:31:40 +02:00
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void hook_ipc_msgkresult(message *msg, struct proc *proc)
|
2010-06-24 15:31:40 +02:00
|
|
|
{
|
2011-02-08 14:54:33 +01:00
|
|
|
#if DEBUG_DUMPIPC
|
2013-05-01 21:02:06 +02:00
|
|
|
printmsg(msg, NULL, proc, 'k', 0);
|
2011-02-08 14:54:33 +01:00
|
|
|
#endif
|
|
|
|
#if DEBUG_IPCSTATS
|
|
|
|
statmsg(msg, proc, NULL);
|
|
|
|
#endif
|
2010-06-24 15:31:40 +02:00
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void hook_ipc_msgrecv(message *msg, struct proc *src, struct proc *dst)
|
2010-06-24 15:31:40 +02:00
|
|
|
{
|
2011-02-08 14:54:33 +01:00
|
|
|
#if DEBUG_DUMPIPC
|
2013-05-01 21:02:06 +02:00
|
|
|
printmsg(msg, src, dst, 'r', 0);
|
2011-02-08 14:54:33 +01:00
|
|
|
#endif
|
|
|
|
#if DEBUG_IPCSTATS
|
|
|
|
statmsg(msg, src, dst);
|
|
|
|
#endif
|
2010-06-24 15:31:40 +02:00
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void hook_ipc_msgsend(message *msg, struct proc *src, struct proc *dst)
|
2010-06-24 15:31:40 +02:00
|
|
|
{
|
2011-02-08 14:54:33 +01:00
|
|
|
#if DEBUG_DUMPIPC
|
2013-05-01 21:02:06 +02:00
|
|
|
printmsg(msg, src, dst, 's', 1);
|
2011-02-08 14:54:33 +01:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void hook_ipc_clear(struct proc *p)
|
2011-02-08 14:54:33 +01:00
|
|
|
{
|
|
|
|
#if DEBUG_IPCSTATS
|
|
|
|
int slot, i;
|
|
|
|
assert(p);
|
|
|
|
proc2slot(p, slot);
|
|
|
|
for(i = 0; i < IPCPROCS; i++)
|
|
|
|
messages[slot][i] = messages[i][slot] = 0;
|
|
|
|
#endif
|
2010-06-24 15:31:40 +02:00
|
|
|
}
|
|
|
|
#endif
|