ARM: Enable caches
First round, some more optimizations are possible and should be activated. Change-Id: I3b7dee7c82fbffd823a08bec1c5d5ebcf769f92f
This commit is contained in:
parent
b36292e232
commit
e4fa9802cb
8 changed files with 83 additions and 60 deletions
|
@ -204,7 +204,6 @@ void __switch_address_space(struct proc *p, struct proc **__ptproc)
|
||||||
if (new_ttbr == orig_ttbr)
|
if (new_ttbr == orig_ttbr)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
refresh_tlb();
|
|
||||||
write_ttbr0(new_ttbr);
|
write_ttbr0(new_ttbr);
|
||||||
|
|
||||||
*__ptproc = p;
|
*__ptproc = p;
|
||||||
|
|
|
@ -100,9 +100,11 @@ static phys_bytes createpde(
|
||||||
pdeval = pr->p_seg.p_ttbr_v[ARM_VM_PDE(linaddr)];
|
pdeval = pr->p_seg.p_ttbr_v[ARM_VM_PDE(linaddr)];
|
||||||
} else {
|
} else {
|
||||||
/* Requested address is physical. Make up the PDE entry. */
|
/* Requested address is physical. Make up the PDE entry. */
|
||||||
pdeval = (linaddr & ARM_VM_SECTION_MASK) |
|
pdeval = (linaddr & ARM_VM_SECTION_MASK)
|
||||||
ARM_VM_SECTION |
|
| ARM_VM_SECTION
|
||||||
ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_USER;
|
| ARM_VM_SECTION_DOMAIN
|
||||||
|
| ARM_VM_SECTION_WT
|
||||||
|
| ARM_VM_SECTION_USER;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write the pde value that we need into a pde that the kernel
|
/* Write the pde value that we need into a pde that the kernel
|
||||||
|
@ -189,7 +191,6 @@ static int lin_lin_copy(struct proc *srcproc, vir_bytes srclinaddr,
|
||||||
dstptr = createpde(dstproc, dstlinaddr, &chunk, 1, &changed);
|
dstptr = createpde(dstproc, dstlinaddr, &chunk, 1, &changed);
|
||||||
if(changed) {
|
if(changed) {
|
||||||
reload_ttbr0();
|
reload_ttbr0();
|
||||||
refresh_tlb();
|
|
||||||
}
|
}
|
||||||
/* Copy pages. */
|
/* Copy pages. */
|
||||||
PHYS_COPY_CATCH(srcptr, dstptr, chunk, addr);
|
PHYS_COPY_CATCH(srcptr, dstptr, chunk, addr);
|
||||||
|
@ -305,12 +306,13 @@ int vm_lookup(const struct proc *proc, const vir_bytes virtual,
|
||||||
return EFAULT;
|
return EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We don't expect to ever see this. */
|
/* We don't expect to ever see this.
|
||||||
|
* LSC Impossible with the previous test.
|
||||||
if(pde_v & ARM_VM_BIGPAGE) {
|
if(pde_v & ARM_VM_BIGPAGE) {
|
||||||
*physical = pde_v & ARM_VM_SECTION_MASK;
|
*physical = pde_v & ARM_VM_SECTION_MASK;
|
||||||
if(ptent) *ptent = pde_v;
|
if(ptent) *ptent = pde_v;
|
||||||
*physical += virtual & ARM_VM_OFFSET_MASK_1MB;
|
*physical += virtual & ARM_VM_OFFSET_MASK_1MB;
|
||||||
} else {
|
} else */ {
|
||||||
/* Retrieve page table entry. */
|
/* Retrieve page table entry. */
|
||||||
pt = (u32_t *) (pde_v & ARM_VM_PDE_MASK);
|
pt = (u32_t *) (pde_v & ARM_VM_PDE_MASK);
|
||||||
assert(!((u32_t) pt % ARM_PAGETABLE_SIZE));
|
assert(!((u32_t) pt % ARM_PAGETABLE_SIZE));
|
||||||
|
@ -500,7 +502,6 @@ int vm_memset(struct proc* caller, endpoint_t who, phys_bytes ph, int c,
|
||||||
|
|
||||||
if (new_ttbr) {
|
if (new_ttbr) {
|
||||||
reload_ttbr0();
|
reload_ttbr0();
|
||||||
refresh_tlb();
|
|
||||||
}
|
}
|
||||||
/* If a page fault happens, pfa is non-null */
|
/* If a page fault happens, pfa is non-null */
|
||||||
if ((pfa = phys_memset(ptr, pattern, chunk))) {
|
if ((pfa = phys_memset(ptr, pattern, chunk))) {
|
||||||
|
@ -787,5 +788,5 @@ int arch_enable_paging(struct proc * caller)
|
||||||
void release_address_space(struct proc *pr)
|
void release_address_space(struct proc *pr)
|
||||||
{
|
{
|
||||||
pr->p_seg.p_ttbr_v = NULL;
|
pr->p_seg.p_ttbr_v = NULL;
|
||||||
refresh_tlb();
|
barrier();
|
||||||
}
|
}
|
||||||
|
|
|
@ -157,8 +157,9 @@ void pg_identity(kinfo_t *cbi)
|
||||||
|
|
||||||
/* Set up an identity mapping page directory */
|
/* Set up an identity mapping page directory */
|
||||||
for(i = 0; i < ARM_VM_DIR_ENTRIES; i++) {
|
for(i = 0; i < ARM_VM_DIR_ENTRIES; i++) {
|
||||||
u32_t flags = ARM_VM_SECTION |
|
u32_t flags = ARM_VM_SECTION
|
||||||
ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_USER;
|
| ARM_VM_SECTION_USER
|
||||||
|
| ARM_VM_SECTION_DOMAIN;
|
||||||
phys = i * ARM_BIG_PAGE_SIZE;
|
phys = i * ARM_BIG_PAGE_SIZE;
|
||||||
pagedir[i] = phys | flags;
|
pagedir[i] = phys | flags;
|
||||||
}
|
}
|
||||||
|
@ -173,10 +174,10 @@ int pg_mapkernel(void)
|
||||||
assert(!(kern_phys_start % ARM_BIG_PAGE_SIZE));
|
assert(!(kern_phys_start % ARM_BIG_PAGE_SIZE));
|
||||||
pde = kern_vir_start / ARM_BIG_PAGE_SIZE; /* start pde */
|
pde = kern_vir_start / ARM_BIG_PAGE_SIZE; /* start pde */
|
||||||
while(mapped < kern_kernlen) {
|
while(mapped < kern_kernlen) {
|
||||||
pagedir[pde] = (kern_phys & ARM_VM_PDE_MASK) |
|
pagedir[pde] = (kern_phys & ARM_VM_PDE_MASK) | ARM_VM_SECTION
|
||||||
ARM_VM_SECTION |
|
| ARM_VM_SECTION_SUPER
|
||||||
ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_WB |
|
| ARM_VM_SECTION_DOMAIN
|
||||||
ARM_VM_SECTION_SHAREABLE | ARM_VM_SECTION_SUPER;
|
| ARM_VM_SECTION_WT;
|
||||||
mapped += ARM_BIG_PAGE_SIZE;
|
mapped += ARM_BIG_PAGE_SIZE;
|
||||||
kern_phys += ARM_BIG_PAGE_SIZE;
|
kern_phys += ARM_BIG_PAGE_SIZE;
|
||||||
pde++;
|
pde++;
|
||||||
|
@ -196,7 +197,10 @@ void vm_enable_paging(void)
|
||||||
sctlr = read_sctlr();
|
sctlr = read_sctlr();
|
||||||
|
|
||||||
/* Enable MMU */
|
/* Enable MMU */
|
||||||
sctlr |= (SCTLR_M);
|
sctlr |= SCTLR_M;
|
||||||
|
|
||||||
|
/* AFE set to zero (default reset value): not using simplified model. */
|
||||||
|
/* TRE set to zero (default reset value): TEX[2:0] are used, plus C and B bits.*/
|
||||||
|
|
||||||
/* Enable instruction and data cache */
|
/* Enable instruction and data cache */
|
||||||
sctlr |= SCTLR_C;
|
sctlr |= SCTLR_C;
|
||||||
|
@ -207,7 +211,6 @@ void vm_enable_paging(void)
|
||||||
phys_bytes pg_load()
|
phys_bytes pg_load()
|
||||||
{
|
{
|
||||||
phys_bytes phpagedir = vir2phys(pagedir);
|
phys_bytes phpagedir = vir2phys(pagedir);
|
||||||
refresh_tlb();
|
|
||||||
write_ttbr0(phpagedir);
|
write_ttbr0(phpagedir);
|
||||||
return phpagedir;
|
return phpagedir;
|
||||||
}
|
}
|
||||||
|
@ -258,13 +261,14 @@ void pg_map(phys_bytes phys, vir_bytes vaddr, vir_bytes vaddr_end,
|
||||||
phys_bytes ph;
|
phys_bytes ph;
|
||||||
pt = alloc_pagetable(&ph);
|
pt = alloc_pagetable(&ph);
|
||||||
pagedir[pde] = (ph & ARM_VM_PDE_MASK)
|
pagedir[pde] = (ph & ARM_VM_PDE_MASK)
|
||||||
| ARM_VM_PAGEDIR | ARM_VM_PDE_DOMAIN;
|
| ARM_VM_PAGEDIR
|
||||||
|
| ARM_VM_PDE_DOMAIN;
|
||||||
mapped_pde = pde;
|
mapped_pde = pde;
|
||||||
}
|
}
|
||||||
assert(pt);
|
assert(pt);
|
||||||
pt[pte] = (source & ARM_VM_PTE_MASK)
|
pt[pte] = (source & ARM_VM_PTE_MASK)
|
||||||
| ARM_VM_PAGETABLE
|
| ARM_VM_PAGETABLE
|
||||||
| ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE
|
| ARM_VM_PTE_WT
|
||||||
| ARM_VM_PTE_USER;
|
| ARM_VM_PTE_USER;
|
||||||
vaddr += ARM_PAGE_SIZE;
|
vaddr += ARM_PAGE_SIZE;
|
||||||
if(phys != PG_ALLOCATEME)
|
if(phys != PG_ALLOCATEME)
|
||||||
|
|
|
@ -173,8 +173,9 @@ void pg_identity(kinfo_t *cbi)
|
||||||
|
|
||||||
/* Set up an identity mapping page directory */
|
/* Set up an identity mapping page directory */
|
||||||
for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
|
for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
|
||||||
u32_t flags = I386_VM_PRESENT | I386_VM_BIGPAGE |
|
u32_t flags = I386_VM_PRESENT | I386_VM_BIGPAGE
|
||||||
I386_VM_USER | I386_VM_WRITE;
|
| I386_VM_USER
|
||||||
|
| I386_VM_WRITE;
|
||||||
phys = i * I386_BIG_PAGE_SIZE;
|
phys = i * I386_BIG_PAGE_SIZE;
|
||||||
if((cbi->mem_high_phys & I386_VM_ADDR_MASK_4MB)
|
if((cbi->mem_high_phys & I386_VM_ADDR_MASK_4MB)
|
||||||
<= (phys & I386_VM_ADDR_MASK_4MB)) {
|
<= (phys & I386_VM_ADDR_MASK_4MB)) {
|
||||||
|
|
|
@ -421,7 +421,7 @@ void *vm_allocpages(phys_bytes *phys, int reason, int pages)
|
||||||
if((r=pt_writemap(vmprocess, pt, loc, *phys, VM_PAGE_SIZE*pages,
|
if((r=pt_writemap(vmprocess, pt, loc, *phys, VM_PAGE_SIZE*pages,
|
||||||
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
|
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
|
||||||
#if defined(__arm__)
|
#if defined(__arm__)
|
||||||
| ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE
|
| ARM_VM_PTE_WT
|
||||||
#endif
|
#endif
|
||||||
, 0)) != OK) {
|
, 0)) != OK) {
|
||||||
free_mem(newpage, pages);
|
free_mem(newpage, pages);
|
||||||
|
@ -468,7 +468,7 @@ void vm_pagelock(void *vir, int lockflag)
|
||||||
#if defined(__arm__)
|
#if defined(__arm__)
|
||||||
else
|
else
|
||||||
flags |= ARCH_VM_PTE_RO;
|
flags |= ARCH_VM_PTE_RO;
|
||||||
flags |= ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE;
|
flags |= ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE; // LSC FIXME
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Update flags. */
|
/* Update flags. */
|
||||||
|
@ -512,6 +512,12 @@ int vm_addrok(void *vir, int writeflag)
|
||||||
printf("addr not ok: pde %d present but pde unwritable\n", pde);
|
printf("addr not ok: pde %d present but pde unwritable\n", pde);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
#elif defined(__arm__)
|
||||||
|
if(writeflag &&
|
||||||
|
(pt->pt_dir[pde] & ARCH_VM_PTE_RO)) {
|
||||||
|
printf("addr not ok: pde %d present but pde unwritable\n", pde);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
|
if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
|
||||||
|
@ -524,12 +530,13 @@ int vm_addrok(void *vir, int writeflag)
|
||||||
if(writeflag &&
|
if(writeflag &&
|
||||||
!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) {
|
!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) {
|
||||||
printf("addr not ok: pde %d / pte %d present but unwritable\n",
|
printf("addr not ok: pde %d / pte %d present but unwritable\n",
|
||||||
#elif defined(__arm__)
|
|
||||||
if(!writeflag &&
|
|
||||||
!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
|
|
||||||
printf("addr not ok: pde %d / pte %d present but writable\n",
|
|
||||||
#endif
|
|
||||||
pde, pte);
|
pde, pte);
|
||||||
|
#elif defined(__arm__)
|
||||||
|
if(writeflag &&
|
||||||
|
(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
|
||||||
|
printf("addr not ok: pde %d / pte %d present but unwritable\n",
|
||||||
|
pde, pte);
|
||||||
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -571,7 +578,7 @@ static int pt_ptalloc(pt_t *pt, int pde, u32_t flags)
|
||||||
| ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW;
|
| ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW;
|
||||||
#elif defined(__arm__)
|
#elif defined(__arm__)
|
||||||
pt->pt_dir[pde] = (pt_phys & ARCH_VM_PDE_MASK)
|
pt->pt_dir[pde] = (pt_phys & ARCH_VM_PDE_MASK)
|
||||||
| ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN;
|
| ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN; //LSC FIXME
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return OK;
|
return OK;
|
||||||
|
@ -766,8 +773,8 @@ int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp)
|
||||||
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW,
|
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW,
|
||||||
#elif defined(__arm__)
|
#elif defined(__arm__)
|
||||||
if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, ARCH_PAGEDIR_SIZE,
|
if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, ARCH_PAGEDIR_SIZE,
|
||||||
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW |
|
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER |
|
||||||
ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE,
|
ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE, //LSC FIXME
|
||||||
#endif
|
#endif
|
||||||
WMF_OVERWRITE)) != OK) {
|
WMF_OVERWRITE)) != OK) {
|
||||||
return r;
|
return r;
|
||||||
|
@ -793,8 +800,7 @@ int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp)
|
||||||
if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE,
|
if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE,
|
||||||
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
|
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
|
||||||
#ifdef __arm__
|
#ifdef __arm__
|
||||||
| ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW |
|
| ARM_VM_PTE_WB
|
||||||
ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE
|
|
||||||
#endif
|
#endif
|
||||||
,
|
,
|
||||||
WMF_OVERWRITE)) != OK) {
|
WMF_OVERWRITE)) != OK) {
|
||||||
|
@ -1202,9 +1208,6 @@ void pt_init(void)
|
||||||
kern_mappings[index].flags |= PTF_NOCACHE;
|
kern_mappings[index].flags |= PTF_NOCACHE;
|
||||||
#elif defined(__arm__)
|
#elif defined(__arm__)
|
||||||
kern_mappings[index].flags |= ARM_VM_PTE_DEVICE;
|
kern_mappings[index].flags |= ARM_VM_PTE_DEVICE;
|
||||||
else
|
|
||||||
kern_mappings[index].flags |=
|
|
||||||
ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE;
|
|
||||||
#endif
|
#endif
|
||||||
if(flags & VMMF_USER)
|
if(flags & VMMF_USER)
|
||||||
kern_mappings[index].flags |= ARCH_VM_PTE_USER;
|
kern_mappings[index].flags |= ARCH_VM_PTE_USER;
|
||||||
|
@ -1267,8 +1270,9 @@ void pt_init(void)
|
||||||
pdm->val = (ph & ARCH_VM_ADDR_MASK) |
|
pdm->val = (ph & ARCH_VM_ADDR_MASK) |
|
||||||
ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW;
|
ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW;
|
||||||
#elif defined(__arm__)
|
#elif defined(__arm__)
|
||||||
pdm->val = (ph & ARCH_VM_PDE_MASK) |
|
pdm->val = (ph & ARCH_VM_PDE_MASK)
|
||||||
ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN;
|
| ARCH_VM_PDE_PRESENT
|
||||||
|
| ARM_VM_PDE_DOMAIN; //LSC FIXME
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1392,9 +1396,10 @@ int pt_bind(pt_t *pt, struct vmproc *who)
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < pages_per_pagedir; i++) {
|
for (i = 0; i < pages_per_pagedir; i++) {
|
||||||
pdm->page_directories[pdeslot*pages_per_pagedir+i] =
|
pdm->page_directories[pdeslot*pages_per_pagedir+i] =
|
||||||
(phys+i*VM_PAGE_SIZE) |
|
(phys+i*VM_PAGE_SIZE)
|
||||||
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_RW |
|
| ARCH_VM_PTE_PRESENT
|
||||||
ARCH_VM_PTE_USER;
|
| ARCH_VM_PTE_RW
|
||||||
|
| ARCH_VM_PTE_USER; //LSC FIXME
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -1454,10 +1459,11 @@ int pt_mapkernel(pt_t *pt)
|
||||||
pt->pt_dir[kern_pde] = addr | ARCH_VM_PDE_PRESENT |
|
pt->pt_dir[kern_pde] = addr | ARCH_VM_PDE_PRESENT |
|
||||||
ARCH_VM_BIGPAGE | ARCH_VM_PTE_RW | global_bit;
|
ARCH_VM_BIGPAGE | ARCH_VM_PTE_RW | global_bit;
|
||||||
#elif defined(__arm__)
|
#elif defined(__arm__)
|
||||||
pt->pt_dir[kern_pde] = (addr & ARCH_VM_PDE_MASK) |
|
pt->pt_dir[kern_pde] = (addr & ARCH_VM_PDE_MASK)
|
||||||
ARM_VM_SECTION |
|
| ARM_VM_SECTION
|
||||||
ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_WB |
|
| ARM_VM_SECTION_DOMAIN
|
||||||
ARM_VM_SECTION_SHAREABLE | ARM_VM_SECTION_SUPER;
|
| ARM_VM_SECTION_WB
|
||||||
|
| ARM_VM_SECTION_SUPER;
|
||||||
#endif
|
#endif
|
||||||
kern_pde++;
|
kern_pde++;
|
||||||
mapped += ARCH_BIG_PAGE_SIZE;
|
mapped += ARCH_BIG_PAGE_SIZE;
|
||||||
|
|
|
@ -13,7 +13,6 @@
|
||||||
#define PTF_PRESENT I386_VM_PRESENT
|
#define PTF_PRESENT I386_VM_PRESENT
|
||||||
#define PTF_USER I386_VM_USER
|
#define PTF_USER I386_VM_USER
|
||||||
#define PTF_GLOBAL I386_VM_GLOBAL
|
#define PTF_GLOBAL I386_VM_GLOBAL
|
||||||
#define PTF_MAPALLOC I386_VM_PTAVAIL1 /* Page allocated by pt code. */
|
|
||||||
#define PTF_NOCACHE (I386_VM_PWT | I386_VM_PCD)
|
#define PTF_NOCACHE (I386_VM_PWT | I386_VM_PCD)
|
||||||
|
|
||||||
#define ARCH_VM_DIR_ENTRIES I386_VM_DIR_ENTRIES
|
#define ARCH_VM_DIR_ENTRIES I386_VM_DIR_ENTRIES
|
||||||
|
|
|
@ -342,7 +342,7 @@ void blockstats(void)
|
||||||
static int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
|
static int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
|
||||||
struct phys_region *pr)
|
struct phys_region *pr)
|
||||||
{
|
{
|
||||||
int rw;
|
int flags = PTF_PRESENT | PTF_USER;
|
||||||
struct phys_block *pb = pr->ph;
|
struct phys_block *pb = pr->ph;
|
||||||
|
|
||||||
assert(vr);
|
assert(vr);
|
||||||
|
@ -354,12 +354,19 @@ static int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
|
||||||
assert(pb->refcount > 0);
|
assert(pb->refcount > 0);
|
||||||
|
|
||||||
if(pr_writable(vr, pr))
|
if(pr_writable(vr, pr))
|
||||||
rw = PTF_WRITE;
|
flags |= PTF_WRITE;
|
||||||
else
|
else
|
||||||
rw = PTF_READ;
|
flags |= PTF_READ;
|
||||||
|
|
||||||
|
#if defined(__arm__)
|
||||||
|
if (pb->phys >= 0x80000000 && pb->phys < (0xc0000000 - VM_PAGE_SIZE)) {
|
||||||
|
// LSC Do this only for actual RAM
|
||||||
|
flags |= ARM_VM_PTE_WT;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if(pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
|
if(pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
|
||||||
pb->phys, VM_PAGE_SIZE, PTF_PRESENT | PTF_USER | rw,
|
pb->phys, VM_PAGE_SIZE, flags,
|
||||||
#if SANITYCHECKS
|
#if SANITYCHECKS
|
||||||
!pr->written ? 0 :
|
!pr->written ? 0 :
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -301,7 +301,13 @@ int _brk(void *addr)
|
||||||
mem = CLICK2ABS(newpage);
|
mem = CLICK2ABS(newpage);
|
||||||
if(pt_writemap(vmprocess, &vmprocess->vm_pt,
|
if(pt_writemap(vmprocess, &vmprocess->vm_pt,
|
||||||
v, mem, VM_PAGE_SIZE,
|
v, mem, VM_PAGE_SIZE,
|
||||||
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW, 0) != OK) {
|
ARCH_VM_PTE_PRESENT
|
||||||
|
| ARCH_VM_PTE_USER
|
||||||
|
| ARCH_VM_PTE_RW
|
||||||
|
#if defined(__arm__)
|
||||||
|
| ARM_VM_PTE_WB
|
||||||
|
#endif
|
||||||
|
, 0) != OK) {
|
||||||
free_mem(newpage, 1);
|
free_mem(newpage, 1);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue