Some more 64bit function eradication.
. Replace 64bit funcions with operators in arch_clock.c . Replace 64bit funcions with operators in proc.c . Replace 64bit funcions with operators in vbox.c . Replace 64bit funcions with operators in driver.c . Eradicates is_zero64, make_zero64, neg64 Change-Id: Ie4e1242a73534f114725271b2e2365b2004cb7b9
This commit is contained in:
parent
7c62cdaaa7
commit
06154a34a4
9 changed files with 59 additions and 66 deletions
|
@ -77,8 +77,8 @@ static int driver_open(int which)
|
||||||
if(!size_known) {
|
if(!size_known) {
|
||||||
disk_size = part.size;
|
disk_size = part.size;
|
||||||
size_known = 1;
|
size_known = 1;
|
||||||
sectors = div64u(disk_size, SECTOR_SIZE);
|
sectors = (unsigned long)(disk_size / SECTOR_SIZE);
|
||||||
if(cmp64(mul64u(sectors, SECTOR_SIZE), disk_size)) {
|
if ((u64_t)sectors * SECTOR_SIZE != disk_size) {
|
||||||
printf("Filter: partition too large\n");
|
printf("Filter: partition too large\n");
|
||||||
|
|
||||||
return RET_REDO;
|
return RET_REDO;
|
||||||
|
@ -88,7 +88,7 @@ static int driver_open(int which)
|
||||||
disk_size, sectors);
|
disk_size, sectors);
|
||||||
#endif
|
#endif
|
||||||
} else {
|
} else {
|
||||||
if(cmp64(disk_size, part.size)) {
|
if (disk_size != part.size) {
|
||||||
printf("Filter: partition size mismatch "
|
printf("Filter: partition size mismatch "
|
||||||
"(0x%"PRIx64" != 0x%"PRIx64")\n",
|
"(0x%"PRIx64" != 0x%"PRIx64")\n",
|
||||||
part.size, disk_size);
|
part.size, disk_size);
|
||||||
|
@ -954,7 +954,7 @@ int read_write(u64_t pos, char *bufa, char *bufb, size_t *sizep, int request)
|
||||||
* report the driver for acting strangely!
|
* report the driver for acting strangely!
|
||||||
*/
|
*/
|
||||||
if (m1.BDEV_STATUS > (ssize_t) *sizep ||
|
if (m1.BDEV_STATUS > (ssize_t) *sizep ||
|
||||||
cmp64(add64u(pos, m1.BDEV_STATUS), disk_size) < 0)
|
(pos + (unsigned int) m1.BDEV_STATUS < disk_size))
|
||||||
return bad_driver(DRIVER_MAIN, BD_PROTO, EFAULT);
|
return bad_driver(DRIVER_MAIN, BD_PROTO, EFAULT);
|
||||||
|
|
||||||
/* Return the actual size. */
|
/* Return the actual size. */
|
||||||
|
@ -976,8 +976,8 @@ int read_write(u64_t pos, char *bufa, char *bufb, size_t *sizep, int request)
|
||||||
|
|
||||||
/* As above */
|
/* As above */
|
||||||
if (m2.BDEV_STATUS > (ssize_t) *sizep ||
|
if (m2.BDEV_STATUS > (ssize_t) *sizep ||
|
||||||
cmp64(add64u(pos, m2.BDEV_STATUS),
|
(pos + (unsigned int) m2.BDEV_STATUS <
|
||||||
disk_size) < 0)
|
disk_size))
|
||||||
return bad_driver(DRIVER_BACKUP, BD_PROTO,
|
return bad_driver(DRIVER_BACKUP, BD_PROTO,
|
||||||
EFAULT);
|
EFAULT);
|
||||||
|
|
||||||
|
|
|
@ -161,7 +161,7 @@ static void vbox_update_time(void)
|
||||||
sizeof(*req)) == VMMDEV_ERR_OK) {
|
sizeof(*req)) == VMMDEV_ERR_OK) {
|
||||||
time(&otime); /* old time */
|
time(&otime); /* old time */
|
||||||
|
|
||||||
ntime = div64u(req->time, 1000); /* new time */
|
ntime = (unsigned long)(req->time / 1000); /* new time */
|
||||||
|
|
||||||
/* Make time go forward, if the difference exceeds the drift
|
/* Make time go forward, if the difference exceeds the drift
|
||||||
* threshold. Never make time go backward.
|
* threshold. Never make time go backward.
|
||||||
|
|
|
@ -9,10 +9,6 @@
|
||||||
|
|
||||||
#include <limits.h>
|
#include <limits.h>
|
||||||
|
|
||||||
#define is_zero64(i) ((i) == 0)
|
|
||||||
#define make_zero64(i) ((i) = 0)
|
|
||||||
#define neg64(i) ((i) = -(i))
|
|
||||||
|
|
||||||
static inline u64_t add64(u64_t i, u64_t j)
|
static inline u64_t add64(u64_t i, u64_t j)
|
||||||
{
|
{
|
||||||
return i + j;
|
return i + j;
|
||||||
|
|
|
@ -52,8 +52,8 @@ void cycles_accounting_init(void)
|
||||||
{
|
{
|
||||||
read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
|
read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
|
||||||
|
|
||||||
make_zero64(get_cpu_var(cpu, cpu_last_tsc));
|
get_cpu_var(cpu, cpu_last_tsc) = 0;
|
||||||
make_zero64(get_cpu_var(cpu, cpu_last_idle));
|
get_cpu_var(cpu, cpu_last_idle) = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void context_stop(struct proc * p)
|
void context_stop(struct proc * p)
|
||||||
|
|
|
@ -119,11 +119,11 @@ static void estimate_cpu_freq(void)
|
||||||
/* remove the probe */
|
/* remove the probe */
|
||||||
rm_irq_handler(&calib_cpu);
|
rm_irq_handler(&calib_cpu);
|
||||||
|
|
||||||
tsc_delta = sub64(tsc1, tsc0);
|
tsc_delta = tsc1 - tsc0;
|
||||||
|
|
||||||
cpu_freq = mul64(div64u64(tsc_delta, PROBE_TICKS - 1), make64(system_hz, 0));
|
cpu_freq = (tsc_delta / (PROBE_TICKS - 1)) * system_hz;
|
||||||
cpu_set_freq(cpuid, cpu_freq);
|
cpu_set_freq(cpuid, cpu_freq);
|
||||||
cpu_info[cpuid].freq = div64u(cpu_freq, 1000000);
|
cpu_info[cpuid].freq = (unsigned long)(cpu_freq / 1000000);
|
||||||
BOOT_VERBOSE(cpu_print_freq(cpuid));
|
BOOT_VERBOSE(cpu_print_freq(cpuid));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -133,10 +133,9 @@ int init_local_timer(unsigned freq)
|
||||||
/* if we know the address, lapic is enabled and we should use it */
|
/* if we know the address, lapic is enabled and we should use it */
|
||||||
if (lapic_addr) {
|
if (lapic_addr) {
|
||||||
unsigned cpu = cpuid;
|
unsigned cpu = cpuid;
|
||||||
tsc_per_ms[cpu] = div64u(cpu_get_freq(cpu), 1000);
|
tsc_per_ms[cpu] = (unsigned long)(cpu_get_freq(cpu) / 1000);
|
||||||
lapic_set_timer_one_shot(1000000/system_hz);
|
lapic_set_timer_one_shot(1000000 / system_hz);
|
||||||
} else
|
} else {
|
||||||
{
|
|
||||||
BOOT_VERBOSE(printf("Initiating legacy i8253 timer\n"));
|
BOOT_VERBOSE(printf("Initiating legacy i8253 timer\n"));
|
||||||
#else
|
#else
|
||||||
{
|
{
|
||||||
|
@ -144,7 +143,7 @@ int init_local_timer(unsigned freq)
|
||||||
init_8253A_timer(freq);
|
init_8253A_timer(freq);
|
||||||
estimate_cpu_freq();
|
estimate_cpu_freq();
|
||||||
/* always only 1 cpu in the system */
|
/* always only 1 cpu in the system */
|
||||||
tsc_per_ms[0] = div64u(cpu_get_freq(0), 1000);
|
tsc_per_ms[0] = (unsigned long)(cpu_get_freq(0) / 1000);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -199,8 +198,8 @@ void cycles_accounting_init(void)
|
||||||
|
|
||||||
read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
|
read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
|
||||||
|
|
||||||
make_zero64(get_cpu_var(cpu, cpu_last_tsc));
|
get_cpu_var(cpu, cpu_last_tsc) = 0;
|
||||||
make_zero64(get_cpu_var(cpu, cpu_last_idle));
|
get_cpu_var(cpu, cpu_last_idle) = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void context_stop(struct proc * p)
|
void context_stop(struct proc * p)
|
||||||
|
@ -223,9 +222,9 @@ void context_stop(struct proc * p)
|
||||||
u64_t tmp;
|
u64_t tmp;
|
||||||
|
|
||||||
read_tsc_64(&tsc);
|
read_tsc_64(&tsc);
|
||||||
tmp = sub64(tsc, *__tsc_ctr_switch);
|
tmp = tsc - *__tsc_ctr_switch;
|
||||||
kernel_ticks[cpu] = add64(kernel_ticks[cpu], tmp);
|
kernel_ticks[cpu] = kernel_ticks[cpu] + tmp;
|
||||||
p->p_cycles = add64(p->p_cycles, tmp);
|
p->p_cycles = p->p_cycles + tmp;
|
||||||
must_bkl_unlock = 1;
|
must_bkl_unlock = 1;
|
||||||
} else {
|
} else {
|
||||||
u64_t bkl_tsc;
|
u64_t bkl_tsc;
|
||||||
|
@ -239,11 +238,11 @@ void context_stop(struct proc * p)
|
||||||
|
|
||||||
read_tsc_64(&tsc);
|
read_tsc_64(&tsc);
|
||||||
|
|
||||||
bkl_ticks[cpu] = add64(bkl_ticks[cpu], sub64(tsc, bkl_tsc));
|
bkl_ticks[cpu] = bkl_ticks[cpu] + tsc - bkl_tsc;
|
||||||
bkl_tries[cpu]++;
|
bkl_tries[cpu]++;
|
||||||
bkl_succ[cpu] += !(!(succ == 0));
|
bkl_succ[cpu] += !(!(succ == 0));
|
||||||
|
|
||||||
p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
|
p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/*
|
/*
|
||||||
|
@ -261,20 +260,20 @@ void context_stop(struct proc * p)
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
read_tsc_64(&tsc);
|
read_tsc_64(&tsc);
|
||||||
p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
|
p->p_cycles = p->p_cycles + tsc - *__tsc_ctr_switch;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
tsc_delta = sub64(tsc, *__tsc_ctr_switch);
|
tsc_delta = tsc - *__tsc_ctr_switch;
|
||||||
|
|
||||||
if(kbill_ipc) {
|
if (kbill_ipc) {
|
||||||
kbill_ipc->p_kipc_cycles =
|
kbill_ipc->p_kipc_cycles =
|
||||||
add64(kbill_ipc->p_kipc_cycles, tsc_delta);
|
kbill_ipc->p_kipc_cycles + tsc_delta;
|
||||||
kbill_ipc = NULL;
|
kbill_ipc = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(kbill_kcall) {
|
if (kbill_kcall) {
|
||||||
kbill_kcall->p_kcall_cycles =
|
kbill_kcall->p_kcall_cycles =
|
||||||
add64(kbill_kcall->p_kcall_cycles, tsc_delta);
|
kbill_kcall->p_kcall_cycles + tsc_delta;
|
||||||
kbill_kcall = NULL;
|
kbill_kcall = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -285,15 +284,15 @@ void context_stop(struct proc * p)
|
||||||
*/
|
*/
|
||||||
if (p->p_endpoint >= 0) {
|
if (p->p_endpoint >= 0) {
|
||||||
#if DEBUG_RACE
|
#if DEBUG_RACE
|
||||||
make_zero64(p->p_cpu_time_left);
|
p->p_cpu_time_left = 0;
|
||||||
#else
|
#else
|
||||||
/* if (tsc_delta < p->p_cpu_time_left) in 64bit */
|
/* if (tsc_delta < p->p_cpu_time_left) in 64bit */
|
||||||
if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
|
if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
|
||||||
(ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
|
(ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
|
||||||
ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
|
ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
|
||||||
p->p_cpu_time_left = sub64(p->p_cpu_time_left, tsc_delta);
|
p->p_cpu_time_left = p->p_cpu_time_left - tsc_delta;
|
||||||
else {
|
else {
|
||||||
make_zero64(p->p_cpu_time_left);
|
p->p_cpu_time_left = 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -329,12 +328,12 @@ void context_stop_idle(void)
|
||||||
|
|
||||||
u64_t ms_2_cpu_time(unsigned ms)
|
u64_t ms_2_cpu_time(unsigned ms)
|
||||||
{
|
{
|
||||||
return mul64u(tsc_per_ms[cpuid], ms);
|
return (u64_t)tsc_per_ms[cpuid] * ms;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned cpu_time_2_ms(u64_t cpu_time)
|
unsigned cpu_time_2_ms(u64_t cpu_time)
|
||||||
{
|
{
|
||||||
return div64u(cpu_time, tsc_per_ms[cpuid]);
|
return (unsigned long)(cpu_time / tsc_per_ms[cpuid]);
|
||||||
}
|
}
|
||||||
|
|
||||||
short cpu_load(void)
|
short cpu_load(void)
|
||||||
|
@ -357,13 +356,13 @@ short cpu_load(void)
|
||||||
current_idle = &idle->p_cycles; /* ptr to idle proc */
|
current_idle = &idle->p_cycles; /* ptr to idle proc */
|
||||||
|
|
||||||
/* calculate load since last cpu_load invocation */
|
/* calculate load since last cpu_load invocation */
|
||||||
if (!is_zero64(*last_tsc)) {
|
if (*last_tsc) {
|
||||||
tsc_delta = sub64(current_tsc, *last_tsc);
|
tsc_delta = current_tsc - *last_tsc;
|
||||||
idle_delta = sub64(*current_idle, *last_idle);
|
idle_delta = *current_idle - *last_idle;
|
||||||
|
|
||||||
busy = sub64(tsc_delta, idle_delta);
|
busy = tsc_delta - idle_delta;
|
||||||
busy = mul64(busy, make64(100, 0));
|
busy = busy * 100;
|
||||||
load = ex64lo(div64(busy, tsc_delta));
|
load = ex64lo(busy / tsc_delta);
|
||||||
|
|
||||||
if (load > 100)
|
if (load > 100)
|
||||||
load = 100;
|
load = 100;
|
||||||
|
|
|
@ -197,8 +197,7 @@ static void amd_watchdog_init(const unsigned cpu)
|
||||||
val = 1 << 20 | 1 << 17 | 1 << 16 | 0x76;
|
val = 1 << 20 | 1 << 17 | 1 << 16 | 0x76;
|
||||||
ia32_msr_write(AMD_MSR_EVENT_SEL0, 0, val);
|
ia32_msr_write(AMD_MSR_EVENT_SEL0, 0, val);
|
||||||
|
|
||||||
cpuf = cpu_get_freq(cpu);
|
cpuf = -cpu_get_freq(cpu);
|
||||||
neg64(cpuf);
|
|
||||||
watchdog->resetval = watchdog->watchdog_resetval = cpuf;
|
watchdog->resetval = watchdog->watchdog_resetval = cpuf;
|
||||||
|
|
||||||
ia32_msr_write(AMD_MSR_EVENT_CTR0,
|
ia32_msr_write(AMD_MSR_EVENT_CTR0,
|
||||||
|
@ -224,9 +223,8 @@ static int amd_watchdog_profile_init(const unsigned freq)
|
||||||
|
|
||||||
/* FIXME works only if all CPUs have the same freq */
|
/* FIXME works only if all CPUs have the same freq */
|
||||||
cpuf = cpu_get_freq(cpuid);
|
cpuf = cpu_get_freq(cpuid);
|
||||||
cpuf = div64u64(cpuf, freq);
|
cpuf = -div64u64(cpuf, freq);
|
||||||
|
|
||||||
neg64(cpuf);
|
|
||||||
watchdog->profile_resetval = cpuf;
|
watchdog->profile_resetval = cpuf;
|
||||||
|
|
||||||
return OK;
|
return OK;
|
||||||
|
|
|
@ -161,7 +161,7 @@ void kmain(kinfo_t *local_cbi)
|
||||||
DEBUGEXTRA(("initializing %s... ", ip->proc_name));
|
DEBUGEXTRA(("initializing %s... ", ip->proc_name));
|
||||||
rp = proc_addr(ip->proc_nr); /* get process pointer */
|
rp = proc_addr(ip->proc_nr); /* get process pointer */
|
||||||
ip->endpoint = rp->p_endpoint; /* ipc endpoint */
|
ip->endpoint = rp->p_endpoint; /* ipc endpoint */
|
||||||
make_zero64(rp->p_cpu_time_left);
|
rp->p_cpu_time_left = 0;
|
||||||
if(i < NR_TASKS) /* name (tasks only) */
|
if(i < NR_TASKS) /* name (tasks only) */
|
||||||
strlcpy(rp->p_name, ip->proc_name, sizeof(rp->p_name));
|
strlcpy(rp->p_name, ip->proc_name, sizeof(rp->p_name));
|
||||||
|
|
||||||
|
|
|
@ -252,7 +252,7 @@ not_runnable_pick_new:
|
||||||
if (proc_is_preempted(p)) {
|
if (proc_is_preempted(p)) {
|
||||||
p->p_rts_flags &= ~RTS_PREEMPTED;
|
p->p_rts_flags &= ~RTS_PREEMPTED;
|
||||||
if (proc_is_runnable(p)) {
|
if (proc_is_runnable(p)) {
|
||||||
if (!is_zero64(p->p_cpu_time_left))
|
if (p->p_cpu_time_left)
|
||||||
enqueue_head(p);
|
enqueue_head(p);
|
||||||
else
|
else
|
||||||
enqueue(p);
|
enqueue(p);
|
||||||
|
@ -348,7 +348,7 @@ check_misc_flags:
|
||||||
* as we are sure that a possible out-of-quantum message to the
|
* as we are sure that a possible out-of-quantum message to the
|
||||||
* scheduler will not collide with the regular ipc
|
* scheduler will not collide with the regular ipc
|
||||||
*/
|
*/
|
||||||
if (is_zero64(p->p_cpu_time_left))
|
if (!p->p_cpu_time_left)
|
||||||
proc_no_time(p);
|
proc_no_time(p);
|
||||||
/*
|
/*
|
||||||
* After handling the misc flags the selected process might not be
|
* After handling the misc flags the selected process might not be
|
||||||
|
@ -365,12 +365,12 @@ check_misc_flags:
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
p = arch_finish_switch_to_user();
|
p = arch_finish_switch_to_user();
|
||||||
assert(!is_zero64(p->p_cpu_time_left));
|
assert(p->p_cpu_time_left);
|
||||||
|
|
||||||
context_stop(proc_addr(KERNEL));
|
context_stop(proc_addr(KERNEL));
|
||||||
|
|
||||||
/* If the process isn't the owner of FPU, enable the FPU exception */
|
/* If the process isn't the owner of FPU, enable the FPU exception */
|
||||||
if(get_cpulocal_var(fpu_owner) != p)
|
if (get_cpulocal_var(fpu_owner) != p)
|
||||||
enable_fpu_exception();
|
enable_fpu_exception();
|
||||||
else
|
else
|
||||||
disable_fpu_exception();
|
disable_fpu_exception();
|
||||||
|
@ -1606,7 +1606,7 @@ static void enqueue_head(struct proc *rp)
|
||||||
* the process was runnable without its quantum expired when dequeued. A
|
* the process was runnable without its quantum expired when dequeued. A
|
||||||
* process with no time left should have been handled else and differently
|
* process with no time left should have been handled else and differently
|
||||||
*/
|
*/
|
||||||
assert(!is_zero64(rp->p_cpu_time_left));
|
assert(rp->p_cpu_time_left);
|
||||||
|
|
||||||
assert(q >= 0);
|
assert(q >= 0);
|
||||||
|
|
||||||
|
@ -1689,12 +1689,12 @@ void dequeue(struct proc *rp)
|
||||||
/* this is not all that accurate on virtual machines, especially with
|
/* this is not all that accurate on virtual machines, especially with
|
||||||
IO bound processes that only spend a short amount of time in the queue
|
IO bound processes that only spend a short amount of time in the queue
|
||||||
at a time. */
|
at a time. */
|
||||||
if (!is_zero64(rp->p_accounting.enter_queue)) {
|
if (rp->p_accounting.enter_queue) {
|
||||||
read_tsc_64(&tsc);
|
read_tsc_64(&tsc);
|
||||||
tsc_delta = sub64(tsc, rp->p_accounting.enter_queue);
|
tsc_delta = tsc - rp->p_accounting.enter_queue;
|
||||||
rp->p_accounting.time_in_queue = add64(rp->p_accounting.time_in_queue,
|
rp->p_accounting.time_in_queue = rp->p_accounting.time_in_queue +
|
||||||
tsc_delta);
|
tsc_delta;
|
||||||
make_zero64(rp->p_accounting.enter_queue);
|
rp->p_accounting.enter_queue = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1843,8 +1843,8 @@ void reset_proc_accounting(struct proc *p)
|
||||||
p->p_accounting.ipc_sync = 0;
|
p->p_accounting.ipc_sync = 0;
|
||||||
p->p_accounting.ipc_async = 0;
|
p->p_accounting.ipc_async = 0;
|
||||||
p->p_accounting.dequeues = 0;
|
p->p_accounting.dequeues = 0;
|
||||||
make_zero64(p->p_accounting.time_in_queue);
|
p->p_accounting.time_in_queue = 0;
|
||||||
make_zero64(p->p_accounting.enter_queue);
|
p->p_accounting.enter_queue = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void copr_not_available_handler(void)
|
void copr_not_available_handler(void)
|
||||||
|
|
|
@ -86,10 +86,10 @@ int do_fork(struct proc * caller, message * m_ptr)
|
||||||
RTS_SET(rpc, RTS_NO_QUANTUM);
|
RTS_SET(rpc, RTS_NO_QUANTUM);
|
||||||
reset_proc_accounting(rpc);
|
reset_proc_accounting(rpc);
|
||||||
|
|
||||||
make_zero64(rpc->p_cpu_time_left);
|
rpc->p_cpu_time_left = 0;
|
||||||
make_zero64(rpc->p_cycles);
|
rpc->p_cycles = 0;
|
||||||
make_zero64(rpc->p_kcall_cycles);
|
rpc->p_kcall_cycles = 0;
|
||||||
make_zero64(rpc->p_kipc_cycles);
|
rpc->p_kipc_cycles = 0;
|
||||||
rpc->p_signal_received = 0;
|
rpc->p_signal_received = 0;
|
||||||
|
|
||||||
/* If the parent is a privileged process, take away the privileges from the
|
/* If the parent is a privileged process, take away the privileges from the
|
||||||
|
|
Loading…
Reference in a new issue