From e4fa9802cba4cc51d81c745c96e847658943a14e Mon Sep 17 00:00:00 2001 From: Lionel Sambuc Date: Sun, 10 Feb 2013 20:20:14 +0100 Subject: [PATCH] ARM: Enable caches First round, some more optimizations are possible and should be activated. Change-Id: I3b7dee7c82fbffd823a08bec1c5d5ebcf769f92f --- kernel/arch/earm/arch_system.c | 1 - kernel/arch/earm/memory.c | 17 +++++----- kernel/arch/earm/pg_utils.c | 40 ++++++++++++----------- kernel/arch/i386/pg_utils.c | 5 +-- servers/vm/arch/i386/pagetable.c | 54 ++++++++++++++++++-------------- servers/vm/arch/i386/pagetable.h | 1 - servers/vm/region.c | 15 ++++++--- servers/vm/utility.c | 10 ++++-- 8 files changed, 83 insertions(+), 60 deletions(-) diff --git a/kernel/arch/earm/arch_system.c b/kernel/arch/earm/arch_system.c index 8d171e484..49f13fb97 100644 --- a/kernel/arch/earm/arch_system.c +++ b/kernel/arch/earm/arch_system.c @@ -204,7 +204,6 @@ void __switch_address_space(struct proc *p, struct proc **__ptproc) if (new_ttbr == orig_ttbr) return; - refresh_tlb(); write_ttbr0(new_ttbr); *__ptproc = p; diff --git a/kernel/arch/earm/memory.c b/kernel/arch/earm/memory.c index 9608868a8..f31a1bf4e 100644 --- a/kernel/arch/earm/memory.c +++ b/kernel/arch/earm/memory.c @@ -100,9 +100,11 @@ static phys_bytes createpde( pdeval = pr->p_seg.p_ttbr_v[ARM_VM_PDE(linaddr)]; } else { /* Requested address is physical. Make up the PDE entry. */ - pdeval = (linaddr & ARM_VM_SECTION_MASK) | - ARM_VM_SECTION | - ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_USER; + pdeval = (linaddr & ARM_VM_SECTION_MASK) + | ARM_VM_SECTION + | ARM_VM_SECTION_DOMAIN + | ARM_VM_SECTION_WT + | ARM_VM_SECTION_USER; } /* Write the pde value that we need into a pde that the kernel @@ -189,7 +191,6 @@ static int lin_lin_copy(struct proc *srcproc, vir_bytes srclinaddr, dstptr = createpde(dstproc, dstlinaddr, &chunk, 1, &changed); if(changed) { reload_ttbr0(); - refresh_tlb(); } /* Copy pages. */ PHYS_COPY_CATCH(srcptr, dstptr, chunk, addr); @@ -305,12 +306,13 @@ int vm_lookup(const struct proc *proc, const vir_bytes virtual, return EFAULT; } - /* We don't expect to ever see this. */ + /* We don't expect to ever see this. + * LSC Impossible with the previous test. if(pde_v & ARM_VM_BIGPAGE) { *physical = pde_v & ARM_VM_SECTION_MASK; if(ptent) *ptent = pde_v; *physical += virtual & ARM_VM_OFFSET_MASK_1MB; - } else { + } else */ { /* Retrieve page table entry. */ pt = (u32_t *) (pde_v & ARM_VM_PDE_MASK); assert(!((u32_t) pt % ARM_PAGETABLE_SIZE)); @@ -500,7 +502,6 @@ int vm_memset(struct proc* caller, endpoint_t who, phys_bytes ph, int c, if (new_ttbr) { reload_ttbr0(); - refresh_tlb(); } /* If a page fault happens, pfa is non-null */ if ((pfa = phys_memset(ptr, pattern, chunk))) { @@ -787,5 +788,5 @@ int arch_enable_paging(struct proc * caller) void release_address_space(struct proc *pr) { pr->p_seg.p_ttbr_v = NULL; - refresh_tlb(); + barrier(); } diff --git a/kernel/arch/earm/pg_utils.c b/kernel/arch/earm/pg_utils.c index 2908a7a80..17584db8a 100644 --- a/kernel/arch/earm/pg_utils.c +++ b/kernel/arch/earm/pg_utils.c @@ -156,11 +156,12 @@ void pg_identity(kinfo_t *cbi) assert(cbi->mem_high_phys); /* Set up an identity mapping page directory */ - for(i = 0; i < ARM_VM_DIR_ENTRIES; i++) { - u32_t flags = ARM_VM_SECTION | - ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_USER; - phys = i * ARM_BIG_PAGE_SIZE; - pagedir[i] = phys | flags; + for(i = 0; i < ARM_VM_DIR_ENTRIES; i++) { + u32_t flags = ARM_VM_SECTION + | ARM_VM_SECTION_USER + | ARM_VM_SECTION_DOMAIN; + phys = i * ARM_BIG_PAGE_SIZE; + pagedir[i] = phys | flags; } } @@ -169,14 +170,14 @@ int pg_mapkernel(void) int pde; u32_t mapped = 0, kern_phys = kern_phys_start; - assert(!(kern_vir_start % ARM_BIG_PAGE_SIZE)); - assert(!(kern_phys_start % ARM_BIG_PAGE_SIZE)); - pde = kern_vir_start / ARM_BIG_PAGE_SIZE; /* start pde */ + assert(!(kern_vir_start % ARM_BIG_PAGE_SIZE)); + assert(!(kern_phys_start % ARM_BIG_PAGE_SIZE)); + pde = kern_vir_start / ARM_BIG_PAGE_SIZE; /* start pde */ while(mapped < kern_kernlen) { - pagedir[pde] = (kern_phys & ARM_VM_PDE_MASK) | - ARM_VM_SECTION | - ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_WB | - ARM_VM_SECTION_SHAREABLE | ARM_VM_SECTION_SUPER; + pagedir[pde] = (kern_phys & ARM_VM_PDE_MASK) | ARM_VM_SECTION + | ARM_VM_SECTION_SUPER + | ARM_VM_SECTION_DOMAIN + | ARM_VM_SECTION_WT; mapped += ARM_BIG_PAGE_SIZE; kern_phys += ARM_BIG_PAGE_SIZE; pde++; @@ -196,7 +197,10 @@ void vm_enable_paging(void) sctlr = read_sctlr(); /* Enable MMU */ - sctlr |= (SCTLR_M); + sctlr |= SCTLR_M; + + /* AFE set to zero (default reset value): not using simplified model. */ + /* TRE set to zero (default reset value): TEX[2:0] are used, plus C and B bits.*/ /* Enable instruction and data cache */ sctlr |= SCTLR_C; @@ -207,7 +211,6 @@ void vm_enable_paging(void) phys_bytes pg_load() { phys_bytes phpagedir = vir2phys(pagedir); - refresh_tlb(); write_ttbr0(phpagedir); return phpagedir; } @@ -258,14 +261,15 @@ void pg_map(phys_bytes phys, vir_bytes vaddr, vir_bytes vaddr_end, phys_bytes ph; pt = alloc_pagetable(&ph); pagedir[pde] = (ph & ARM_VM_PDE_MASK) - | ARM_VM_PAGEDIR | ARM_VM_PDE_DOMAIN; + | ARM_VM_PAGEDIR + | ARM_VM_PDE_DOMAIN; mapped_pde = pde; } assert(pt); pt[pte] = (source & ARM_VM_PTE_MASK) - | ARM_VM_PAGETABLE - | ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE - | ARM_VM_PTE_USER; + | ARM_VM_PAGETABLE + | ARM_VM_PTE_WT + | ARM_VM_PTE_USER; vaddr += ARM_PAGE_SIZE; if(phys != PG_ALLOCATEME) phys += ARM_PAGE_SIZE; diff --git a/kernel/arch/i386/pg_utils.c b/kernel/arch/i386/pg_utils.c index 444df1f90..83eb35e6a 100644 --- a/kernel/arch/i386/pg_utils.c +++ b/kernel/arch/i386/pg_utils.c @@ -173,8 +173,9 @@ void pg_identity(kinfo_t *cbi) /* Set up an identity mapping page directory */ for(i = 0; i < I386_VM_DIR_ENTRIES; i++) { - u32_t flags = I386_VM_PRESENT | I386_VM_BIGPAGE | - I386_VM_USER | I386_VM_WRITE; + u32_t flags = I386_VM_PRESENT | I386_VM_BIGPAGE + | I386_VM_USER + | I386_VM_WRITE; phys = i * I386_BIG_PAGE_SIZE; if((cbi->mem_high_phys & I386_VM_ADDR_MASK_4MB) <= (phys & I386_VM_ADDR_MASK_4MB)) { diff --git a/servers/vm/arch/i386/pagetable.c b/servers/vm/arch/i386/pagetable.c index 0ab0eee2c..7187afda0 100644 --- a/servers/vm/arch/i386/pagetable.c +++ b/servers/vm/arch/i386/pagetable.c @@ -421,7 +421,7 @@ void *vm_allocpages(phys_bytes *phys, int reason, int pages) if((r=pt_writemap(vmprocess, pt, loc, *phys, VM_PAGE_SIZE*pages, ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW #if defined(__arm__) - | ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE + | ARM_VM_PTE_WT #endif , 0)) != OK) { free_mem(newpage, pages); @@ -468,7 +468,7 @@ void vm_pagelock(void *vir, int lockflag) #if defined(__arm__) else flags |= ARCH_VM_PTE_RO; - flags |= ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE; + flags |= ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE; // LSC FIXME #endif /* Update flags. */ @@ -512,6 +512,12 @@ int vm_addrok(void *vir, int writeflag) printf("addr not ok: pde %d present but pde unwritable\n", pde); return 0; } +#elif defined(__arm__) + if(writeflag && + (pt->pt_dir[pde] & ARCH_VM_PTE_RO)) { + printf("addr not ok: pde %d present but pde unwritable\n", pde); + return 0; + } #endif if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) { @@ -524,12 +530,13 @@ int vm_addrok(void *vir, int writeflag) if(writeflag && !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) { printf("addr not ok: pde %d / pte %d present but unwritable\n", -#elif defined(__arm__) - if(!writeflag && - !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) { - printf("addr not ok: pde %d / pte %d present but writable\n", -#endif pde, pte); +#elif defined(__arm__) + if(writeflag && + (pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) { + printf("addr not ok: pde %d / pte %d present but unwritable\n", + pde, pte); +#endif return 0; } @@ -571,7 +578,7 @@ static int pt_ptalloc(pt_t *pt, int pde, u32_t flags) | ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW; #elif defined(__arm__) pt->pt_dir[pde] = (pt_phys & ARCH_VM_PDE_MASK) - | ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN; + | ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN; //LSC FIXME #endif return OK; @@ -766,8 +773,8 @@ int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp) ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW, #elif defined(__arm__) if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, ARCH_PAGEDIR_SIZE, - ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW | - ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE, + ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | + ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE, //LSC FIXME #endif WMF_OVERWRITE)) != OK) { return r; @@ -793,8 +800,7 @@ int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp) if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE, ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW #ifdef __arm__ - | ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW | - ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE + | ARM_VM_PTE_WB #endif , WMF_OVERWRITE)) != OK) { @@ -1202,9 +1208,6 @@ void pt_init(void) kern_mappings[index].flags |= PTF_NOCACHE; #elif defined(__arm__) kern_mappings[index].flags |= ARM_VM_PTE_DEVICE; - else - kern_mappings[index].flags |= - ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE; #endif if(flags & VMMF_USER) kern_mappings[index].flags |= ARCH_VM_PTE_USER; @@ -1267,8 +1270,9 @@ void pt_init(void) pdm->val = (ph & ARCH_VM_ADDR_MASK) | ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW; #elif defined(__arm__) - pdm->val = (ph & ARCH_VM_PDE_MASK) | - ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN; + pdm->val = (ph & ARCH_VM_PDE_MASK) + | ARCH_VM_PDE_PRESENT + | ARM_VM_PDE_DOMAIN; //LSC FIXME #endif } } @@ -1392,9 +1396,10 @@ int pt_bind(pt_t *pt, struct vmproc *who) int i; for (i = 0; i < pages_per_pagedir; i++) { pdm->page_directories[pdeslot*pages_per_pagedir+i] = - (phys+i*VM_PAGE_SIZE) | - ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_RW | - ARCH_VM_PTE_USER; + (phys+i*VM_PAGE_SIZE) + | ARCH_VM_PTE_PRESENT + | ARCH_VM_PTE_RW + | ARCH_VM_PTE_USER; //LSC FIXME } } #endif @@ -1454,10 +1459,11 @@ int pt_mapkernel(pt_t *pt) pt->pt_dir[kern_pde] = addr | ARCH_VM_PDE_PRESENT | ARCH_VM_BIGPAGE | ARCH_VM_PTE_RW | global_bit; #elif defined(__arm__) - pt->pt_dir[kern_pde] = (addr & ARCH_VM_PDE_MASK) | - ARM_VM_SECTION | - ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_WB | - ARM_VM_SECTION_SHAREABLE | ARM_VM_SECTION_SUPER; + pt->pt_dir[kern_pde] = (addr & ARCH_VM_PDE_MASK) + | ARM_VM_SECTION + | ARM_VM_SECTION_DOMAIN + | ARM_VM_SECTION_WB + | ARM_VM_SECTION_SUPER; #endif kern_pde++; mapped += ARCH_BIG_PAGE_SIZE; diff --git a/servers/vm/arch/i386/pagetable.h b/servers/vm/arch/i386/pagetable.h index 251802b78..d40039d4b 100644 --- a/servers/vm/arch/i386/pagetable.h +++ b/servers/vm/arch/i386/pagetable.h @@ -13,7 +13,6 @@ #define PTF_PRESENT I386_VM_PRESENT #define PTF_USER I386_VM_USER #define PTF_GLOBAL I386_VM_GLOBAL -#define PTF_MAPALLOC I386_VM_PTAVAIL1 /* Page allocated by pt code. */ #define PTF_NOCACHE (I386_VM_PWT | I386_VM_PCD) #define ARCH_VM_DIR_ENTRIES I386_VM_DIR_ENTRIES diff --git a/servers/vm/region.c b/servers/vm/region.c index 16f76a20c..8d55fe3ae 100644 --- a/servers/vm/region.c +++ b/servers/vm/region.c @@ -342,7 +342,7 @@ void blockstats(void) static int map_ph_writept(struct vmproc *vmp, struct vir_region *vr, struct phys_region *pr) { - int rw; + int flags = PTF_PRESENT | PTF_USER; struct phys_block *pb = pr->ph; assert(vr); @@ -354,12 +354,19 @@ static int map_ph_writept(struct vmproc *vmp, struct vir_region *vr, assert(pb->refcount > 0); if(pr_writable(vr, pr)) - rw = PTF_WRITE; + flags |= PTF_WRITE; else - rw = PTF_READ; + flags |= PTF_READ; + +#if defined(__arm__) + if (pb->phys >= 0x80000000 && pb->phys < (0xc0000000 - VM_PAGE_SIZE)) { + // LSC Do this only for actual RAM + flags |= ARM_VM_PTE_WT; + } +#endif if(pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset, - pb->phys, VM_PAGE_SIZE, PTF_PRESENT | PTF_USER | rw, + pb->phys, VM_PAGE_SIZE, flags, #if SANITYCHECKS !pr->written ? 0 : #endif diff --git a/servers/vm/utility.c b/servers/vm/utility.c index b7d19c111..26f2d9ef7 100644 --- a/servers/vm/utility.c +++ b/servers/vm/utility.c @@ -300,8 +300,14 @@ int _brk(void *addr) if(newpage == NO_MEM) return -1; mem = CLICK2ABS(newpage); if(pt_writemap(vmprocess, &vmprocess->vm_pt, - v, mem, VM_PAGE_SIZE, - ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW, 0) != OK) { + v, mem, VM_PAGE_SIZE, + ARCH_VM_PTE_PRESENT + | ARCH_VM_PTE_USER + | ARCH_VM_PTE_RW +#if defined(__arm__) + | ARM_VM_PTE_WB +#endif + , 0) != OK) { free_mem(newpage, 1); return -1; }