SMP - fixed usage of stale TLB entries
- when kernel copies from userspace, it must be sure that the TLB entries are not stale and thus the referenced memory is correct - everytime we change a process' address space we set p_stale_tlb bits for all CPUs. - Whenever a cpu finds its bit set when it wants to access the process' memory, it refreshes the TLB - it is more conservative than it needs to be but it has low overhead than checking precisely
This commit is contained in:
parent
0a55e63413
commit
8fa95abae4
5 changed files with 49 additions and 2 deletions
|
@ -17,4 +17,18 @@
|
||||||
#define SET_BIT(map,bit) ( MAP_CHUNK(map,bit) |= (1 << CHUNK_OFFSET(bit) ))
|
#define SET_BIT(map,bit) ( MAP_CHUNK(map,bit) |= (1 << CHUNK_OFFSET(bit) ))
|
||||||
#define UNSET_BIT(map,bit) ( MAP_CHUNK(map,bit) &= ~(1 << CHUNK_OFFSET(bit) ))
|
#define UNSET_BIT(map,bit) ( MAP_CHUNK(map,bit) &= ~(1 << CHUNK_OFFSET(bit) ))
|
||||||
|
|
||||||
|
#if defined(CONFIG_SMP) && defined(__GNUC__)
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
|
static inline bits_fill(bitchunk_t * chunks, unsigned bits)
|
||||||
|
{
|
||||||
|
unsigned c, cnt;
|
||||||
|
|
||||||
|
cnt = BITMAP_CHUNKS(bits);
|
||||||
|
for (c = 0; c < cnt; c++)
|
||||||
|
bit_fill(chunks[c]);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#endif /* _BITMAP_H */
|
#endif /* _BITMAP_H */
|
||||||
|
|
|
@ -138,8 +138,8 @@ PRIVATE phys_bytes createpde(
|
||||||
/*===========================================================================*
|
/*===========================================================================*
|
||||||
* lin_lin_copy *
|
* lin_lin_copy *
|
||||||
*===========================================================================*/
|
*===========================================================================*/
|
||||||
PRIVATE int lin_lin_copy(const struct proc *srcproc, vir_bytes srclinaddr,
|
PRIVATE int lin_lin_copy(struct proc *srcproc, vir_bytes srclinaddr,
|
||||||
const struct proc *dstproc, vir_bytes dstlinaddr, vir_bytes bytes)
|
struct proc *dstproc, vir_bytes dstlinaddr, vir_bytes bytes)
|
||||||
{
|
{
|
||||||
u32_t addr;
|
u32_t addr;
|
||||||
proc_nr_t procslot;
|
proc_nr_t procslot;
|
||||||
|
@ -167,6 +167,19 @@ PRIVATE int lin_lin_copy(const struct proc *srcproc, vir_bytes srclinaddr,
|
||||||
vir_bytes chunk = bytes;
|
vir_bytes chunk = bytes;
|
||||||
int changed = 0;
|
int changed = 0;
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
unsigned cpu = cpuid;
|
||||||
|
|
||||||
|
if (GET_BIT(srcproc->p_stale_tlb, cpu)) {
|
||||||
|
changed = 1;
|
||||||
|
UNSET_BIT(srcproc->p_stale_tlb, cpu);
|
||||||
|
}
|
||||||
|
if (GET_BIT(dstproc->p_stale_tlb, cpu)) {
|
||||||
|
changed = 1;
|
||||||
|
UNSET_BIT(dstproc->p_stale_tlb, cpu);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Set up 4MB ranges. */
|
/* Set up 4MB ranges. */
|
||||||
srcptr = createpde(srcproc, srclinaddr, &chunk, 0, &changed);
|
srcptr = createpde(srcproc, srclinaddr, &chunk, 0, &changed);
|
||||||
dstptr = createpde(dstproc, dstlinaddr, &chunk, 1, &changed);
|
dstptr = createpde(dstproc, dstlinaddr, &chunk, 1, &changed);
|
||||||
|
|
|
@ -38,6 +38,11 @@ struct proc {
|
||||||
bitchunk_t p_cpu_mask[BITMAP_CHUNKS(CONFIG_MAX_CPUS)]; /* what CPUs is hte
|
bitchunk_t p_cpu_mask[BITMAP_CHUNKS(CONFIG_MAX_CPUS)]; /* what CPUs is hte
|
||||||
process allowed to
|
process allowed to
|
||||||
run on */
|
run on */
|
||||||
|
bitchunk_t p_stale_tlb[BITMAP_CHUNKS(CONFIG_MAX_CPUS)]; /* On which cpu are
|
||||||
|
possibly stale entries from this process and has
|
||||||
|
to be fresed the next kernel touches this
|
||||||
|
processes memory
|
||||||
|
*/
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Accounting statistics that get passed to the process' scheduler */
|
/* Accounting statistics that get passed to the process' scheduler */
|
||||||
|
|
|
@ -127,6 +127,11 @@ PUBLIC int do_update(struct proc * caller, message * m_ptr)
|
||||||
printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint);
|
printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
bits_fill(src_rp->p_stale_tlb, CONFIG_MAX_CPUS);
|
||||||
|
bits_fill(dst_rp->p_stale_tlb, CONFIG_MAX_CPUS);
|
||||||
|
#endif
|
||||||
|
|
||||||
return OK;
|
return OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -159,6 +159,16 @@ PUBLIC int do_vmctl(struct proc * caller, message * m_ptr)
|
||||||
* cpu
|
* cpu
|
||||||
*/
|
*/
|
||||||
RTS_UNSET(p, RTS_VMINHIBIT);
|
RTS_UNSET(p, RTS_VMINHIBIT);
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/*
|
||||||
|
* We don't know whether kernel has the changed mapping
|
||||||
|
* installed to access userspace memory. And if so, on what CPU.
|
||||||
|
* More over we don't know what mapping has changed and how and
|
||||||
|
* therefore we must invalidate all mappings we have anywhere.
|
||||||
|
* Next time we map memory, we map it fresh.
|
||||||
|
*/
|
||||||
|
bits_fill(p->p_stale_tlb, CONFIG_MAX_CPUS);
|
||||||
|
#endif
|
||||||
return OK;
|
return OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue