From 6405b1ab60b6116a3a97c478575edb350949b783 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Jasiak?= Date: Fri, 2 Oct 2020 11:26:26 +0200 Subject: [PATCH] Almost complete pmap module for AArch64 (#743) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit What's missing: * pmap_growkernel * empty page table for TTBR0 when no user-space is mapped * referenced & modified bits handling * testing Co-authored-by: Krystian Bacławski --- include/aarch64/pmap.h | 12 +- include/aarch64/pte.h | 15 ++ include/aarch64/tlb.h | 13 ++ include/sys/pmap.h | 1 - sys/aarch64/Makefile | 1 + sys/aarch64/boot.c | 26 ++- sys/aarch64/pmap.c | 482 +++++++++++++++++++++++++++++++++++++---- sys/aarch64/rpi3.c | 3 +- sys/aarch64/tlb.c | 27 +++ sys/kern/vmem.c | 1 + sys/mips/pmap.c | 6 +- 11 files changed, 528 insertions(+), 59 deletions(-) create mode 100644 include/aarch64/tlb.h create mode 100644 sys/aarch64/tlb.c diff --git a/include/aarch64/pmap.h b/include/aarch64/pmap.h index 151ca47805..57cd911322 100644 --- a/include/aarch64/pmap.h +++ b/include/aarch64/pmap.h @@ -1,11 +1,7 @@ #ifndef _AARCH64_PMAP_H_ #define _AARCH64_PMAP_H_ -#include - -typedef uint8_t asid_t; -typedef uint64_t pte_t; -typedef uint64_t pde_t; +#include typedef struct pmap pmap_t; @@ -15,4 +11,10 @@ typedef struct pmap pmap_t; /* Number of page table entries. */ #define PT_ENTRIES (PAGESIZE / (int)sizeof(pte_t)) +#define PMAP_KERNEL_BEGIN 0xffff000000000000L +#define PMAP_KERNEL_END 0xffffffffffffffffL + +#define PMAP_USER_BEGIN 0x0000000000400000L +#define PMAP_USER_END 0x0000800000000000L + #endif /* !_AARCH64_PMAP_H_ */ diff --git a/include/aarch64/pte.h b/include/aarch64/pte.h index 1be6f2c814..a7f08646d5 100644 --- a/include/aarch64/pte.h +++ b/include/aarch64/pte.h @@ -31,6 +31,16 @@ #ifndef _MACHINE_PTE_H_ #define _MACHINE_PTE_H_ +#include + +typedef uint8_t asid_t; +typedef uint64_t pte_t; +typedef uint64_t pde_t; + +#define PAGE_SHIFT 12 +#define ASID_SHIFT 48 +#define MAX_ASID 0xFF + /* Block and Page attributes */ #define ATTR_MASK_H UINT64_C(0xfff0000000000000) #define ATTR_MASK_L UINT64_C(0x0000000000000fff) @@ -66,6 +76,11 @@ #define ATTR_NORMAL_MEM_WB 2 #define ATTR_NORMAL_MEM_WT 3 +#define ATTR_S2_S2AP(x) ((x) << 6) +#define ATTR_S2_S2AP_MASK 3 +#define ATTR_S2_S2AP_READ 1 +#define ATTR_S2_S2AP_WRITE 2 + /* Level 0 table, 512GiB per entry */ #define L0_SHIFT 39 #define L0_SIZE (1ul << L0_SHIFT) diff --git a/include/aarch64/tlb.h b/include/aarch64/tlb.h new file mode 100644 index 0000000000..bf5b145dfe --- /dev/null +++ b/include/aarch64/tlb.h @@ -0,0 +1,13 @@ +#ifndef _AARCH64_TLB_H_ +#define _AARCH64_TLB_H_ + +#ifndef _MACHDEP +#error "Do not use this header file outside kernel machine dependent code!" +#endif + +#include + +void tlb_invalidate(pte_t pte, asid_t asid); +void tlb_invalidate_asid(asid_t asid); + +#endif /* !_AARCH64_TLB_H_ */ diff --git a/include/sys/pmap.h b/include/sys/pmap.h index 85daff6344..f0717853e6 100644 --- a/include/sys/pmap.h +++ b/include/sys/pmap.h @@ -35,7 +35,6 @@ vaddr_t pmap_end(pmap_t *pmap); void init_pmap(void); pmap_t *pmap_new(void); -void pmap_reset(pmap_t *pmap); void pmap_delete(pmap_t *pmap); void pmap_enter(pmap_t *pmap, vaddr_t va, vm_page_t *pg, vm_prot_t prot, diff --git a/sys/aarch64/Makefile b/sys/aarch64/Makefile index 9f36a155cb..1f42b83aff 100644 --- a/sys/aarch64/Makefile +++ b/sys/aarch64/Makefile @@ -14,6 +14,7 @@ SOURCES = boot.c \ start.S \ switch.S \ thread.c \ + tlb.c \ trap.c \ ucontext.c diff --git a/sys/aarch64/boot.c b/sys/aarch64/boot.c index 386b86196d..d00dd440ba 100644 --- a/sys/aarch64/boot.c +++ b/sys/aarch64/boot.c @@ -20,7 +20,7 @@ /* Last physical address used by kernel for boot memory allocation. */ __boot_data void *_bootmem_end; /* Kernel page directory entries. */ -alignas(PAGESIZE) pte_t _kernel_pmap_pde[PD_ENTRIES]; +paddr_t _kernel_pmap_pde; alignas(PAGESIZE) uint8_t _atags[PAGESIZE]; extern char exception_vectors[]; @@ -140,9 +140,9 @@ __boot_text static void clear_bss(void) { #define DMAP_L2_SIZE roundup(DMAP_L2_ENTRIES * sizeof(pde_t), PAGESIZE) #define DMAP_L3_SIZE roundup(DMAP_L3_ENTRIES * sizeof(pte_t), PAGESIZE) -__boot_text static void build_page_table(void) { +__boot_text static paddr_t build_page_table(void) { /* l0 entry is 512GB */ - volatile pde_t *l0 = (pde_t *)AARCH64_PHYSADDR(_kernel_pmap_pde); + volatile pde_t *l0 = bootmem_alloc(PAGESIZE); /* l1 entry is 1GB */ volatile pde_t *l1 = bootmem_alloc(PAGESIZE); /* l2 entry is 2MB */ @@ -159,6 +159,13 @@ __boot_text static void build_page_table(void) { l1[L1_INDEX(va)] = (pde_t)l2 | L1_TABLE; l2[L2_INDEX(va)] = (pde_t)l3 | L2_TABLE; + /* TODO(pj) imitate pmap_growkernel from NetBSD */ + l2[L2_INDEX(0)] = (pde_t)bootmem_alloc(PAGESIZE) | L2_TABLE; + for (int i = 0; i < 32; i++) { + l2[L2_INDEX(0xffff000000400000 + i * PAGESIZE * PT_ENTRIES)] = + (pde_t)bootmem_alloc(PAGESIZE) | L2_TABLE; + } + const pte_t pte_default = L3_PAGE | ATTR_AF | ATTR_SH(ATTR_SH_IS) | ATTR_IDX(ATTR_NORMAL_MEM_WB); @@ -191,15 +198,17 @@ __boot_text static void build_page_table(void) { l1d[i] = (pde_t)&l2d[i * PT_ENTRIES] | L1_TABLE; l0[L0_INDEX(DMAP_BASE)] = (pde_t)l1d | L0_TABLE; + + return (paddr_t)l0; } /* Based on locore.S from FreeBSD. */ -__boot_text static void enable_mmu(void) { +__boot_text static void enable_mmu(paddr_t pde) { __dsb("sy"); WRITE_SPECIALREG(VBAR_EL1, exception_vectors); - WRITE_SPECIALREG(TTBR0_EL1, AARCH64_PHYSADDR(_kernel_pmap_pde)); - WRITE_SPECIALREG(TTBR1_EL1, AARCH64_PHYSADDR(_kernel_pmap_pde)); + WRITE_SPECIALREG(TTBR0_EL1, pde); + WRITE_SPECIALREG(TTBR1_EL1, pde); __isb(); /* Clear the Monitor Debug System control register. */ @@ -251,6 +260,8 @@ __boot_text static void enable_mmu(void) { */ WRITE_SPECIALREG(sctlr_el1, SCTLR_M | SCTLR_I | SCTLR_C); __isb(); + + _kernel_pmap_pde = pde; } __boot_text static void atags_copy(atag_tag_t *atags) { @@ -274,8 +285,7 @@ __boot_text void *aarch64_init(atag_tag_t *atags) { _bootmem_end = (void *)align(AARCH64_PHYSADDR(__ebss), PAGESIZE); atags_copy(atags); - build_page_table(); - enable_mmu(); + enable_mmu(build_page_table()); return _atags; } diff --git a/sys/aarch64/pmap.c b/sys/aarch64/pmap.c index 487debf3d8..43deb4cfbc 100644 --- a/sys/aarch64/pmap.c +++ b/sys/aarch64/pmap.c @@ -1,87 +1,451 @@ #define KL_LOG KL_PMAP #include #include -#include +#include +#include +#include +#include +#include #include +#include #include -#include +#include +#include +#include +#include -vaddr_t pmap_start(pmap_t *pmap) { - panic("Not implemented!"); +typedef struct pmap { + mtx_t mtx; /* protects all fields in this structure */ + asid_t asid; /* address space identifier */ + paddr_t pde; /* directory page table physical address */ + vm_pagelist_t pte_pages; /* pages we allocate in page table */ + TAILQ_HEAD(, pv_entry) pv_list; /* all pages mapped by this physical map */ +} pmap_t; + +typedef struct pv_entry { + TAILQ_ENTRY(pv_entry) pmap_link; /* link on pmap::pv_list */ + TAILQ_ENTRY(pv_entry) page_link; /* link on vm_page::pv_list */ + pmap_t *pmap; /* page is mapped in this pmap */ + vaddr_t va; /* under this address */ +} pv_entry_t; + +static POOL_DEFINE(P_PMAP, "pmap", sizeof(pmap_t)); +static POOL_DEFINE(P_PV, "pv_entry", sizeof(pv_entry_t)); + +#define PA_MASK 0xfffffffff000 +#define ADDR_MASK 0x8ffffffff000 +#define DMAP_BASE 0xffffff8000000000 /* last 512GB */ +#define PHYS_TO_DMAP(x) ((intptr_t)(x) + DMAP_BASE) + +static const pte_t pte_default = L3_PAGE | ATTR_AF | ATTR_SH(ATTR_SH_IS); + +static const pte_t vm_prot_map[] = { + [VM_PROT_NONE] = ATTR_XN | pte_default, + [VM_PROT_READ] = ATTR_AP(ATTR_AP_RO) | ATTR_XN | pte_default, + [VM_PROT_WRITE] = ATTR_AP(ATTR_AP_RW) | ATTR_XN | pte_default, + [VM_PROT_READ | VM_PROT_WRITE] = ATTR_AP(ATTR_AP_RW) | ATTR_XN | pte_default, + [VM_PROT_EXEC] = pte_default, + [VM_PROT_READ | VM_PROT_EXEC] = ATTR_AP(ATTR_AP_RO) | pte_default, + [VM_PROT_WRITE | VM_PROT_EXEC] = ATTR_AP(ATTR_AP_RW) | pte_default, + [VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXEC] = + ATTR_AP(ATTR_AP_RW) | pte_default, +}; + +static pmap_t kernel_pmap; +paddr_t _kernel_pmap_pde; +static bitstr_t asid_used[bitstr_size(MAX_ASID)] = {0}; +static spin_t *asid_lock = &SPIN_INITIALIZER(0); + +#define PTE_FRAME_ADDR(pte) ((pte)&PA_MASK) +#define PAGE_OFFSET(x) ((x) & (PAGESIZE - 1)) +#define PG_DMAP_ADDR(pg) ((void *)((intptr_t)(pg)->paddr + DMAP_BASE)) + +/* + * Helper functions. + */ +static bool user_addr_p(vaddr_t addr) { + return (addr >= PMAP_USER_BEGIN) && (addr < PMAP_USER_END); } -vaddr_t pmap_end(pmap_t *pmap) { - panic("Not implemented!"); +static bool kern_addr_p(vaddr_t addr) { + return (addr >= PMAP_KERNEL_BEGIN) && (addr < PMAP_KERNEL_END); } -void pmap_reset(pmap_t *pmap) { - panic("Not implemented!"); +inline vaddr_t pmap_start(pmap_t *pmap) { + return pmap->asid ? PMAP_USER_BEGIN : PMAP_KERNEL_BEGIN; } -void init_pmap(void) { - panic("Not implemented!"); +inline vaddr_t pmap_end(pmap_t *pmap) { + return pmap->asid ? PMAP_USER_END : PMAP_KERNEL_END; } -pmap_t *pmap_new(void) { - panic("Not implemented!"); +inline bool pmap_address_p(pmap_t *pmap, vaddr_t va) { + return pmap_start(pmap) <= va && va < pmap_end(pmap); } -void pmap_delete(pmap_t *pmap) { - panic("Not implemented!"); +inline bool pmap_contains_p(pmap_t *pmap, vaddr_t start, vaddr_t end) { + return pmap_start(pmap) <= start && end <= pmap_end(pmap); } -void pmap_kenter(paddr_t va, paddr_t pa, vm_prot_t prot, unsigned flags) { - panic("Not implemented!"); +inline pmap_t *pmap_kernel(void) { + return &kernel_pmap; } -void pmap_enter(pmap_t *pmap, vaddr_t va, vm_page_t *pg, vm_prot_t prot, - unsigned flags) { - panic("Not implemented!"); +inline pmap_t *pmap_user(void) { + return PCPU_GET(curpmap); +} + +pmap_t *pmap_lookup(vaddr_t addr) { + if (kern_addr_p(addr)) + return pmap_kernel(); + if (user_addr_p(addr)) + return pmap_user(); + return NULL; +} + +/* + * Address space identifiers management. + */ + +static asid_t alloc_asid(void) { + int free = 0; + WITH_SPIN_LOCK (asid_lock) { + bit_ffc(asid_used, MAX_ASID, &free); + if (free < 0) + panic("Out of asids!"); + bit_set(asid_used, free); + } + klog("alloc_asid() = %d", free); + return free; +} + +static void free_asid(asid_t asid) { + klog("free_asid(%d)", asid); + SCOPED_SPIN_LOCK(asid_lock); + bit_clear(asid_used, (unsigned)asid); + tlb_invalidate_asid(asid); +} + +/* + * Physical-to-virtual entries are managed for all pageable mappings. + */ + +static void pv_add(pmap_t *pmap, vaddr_t va, vm_page_t *pg) { + pv_entry_t *pv = pool_alloc(P_PV, M_ZERO); + pv->pmap = pmap; + pv->va = va; + TAILQ_INSERT_TAIL(&pg->pv_list, pv, page_link); + TAILQ_INSERT_TAIL(&pmap->pv_list, pv, pmap_link); +} + +static pv_entry_t *pv_find(pmap_t *pmap, vaddr_t va, vm_page_t *pg) { + pv_entry_t *pv; + TAILQ_FOREACH (pv, &pg->pv_list, page_link) { + if (pv->pmap == pmap && pv->va == va) + return pv; + } + return NULL; +} + +static void pv_remove(pmap_t *pmap, vaddr_t va, vm_page_t *pg) { + pv_entry_t *pv = pv_find(pmap, va, pg); + assert(pv != NULL); + TAILQ_REMOVE(&pg->pv_list, pv, page_link); + TAILQ_REMOVE(&pmap->pv_list, pv, pmap_link); + pool_free(P_PV, pv); +} + +/* + * Routines for accessing page table entries. + */ + +static vm_page_t *pmap_pagealloc(void) { + vm_page_t *pg = vm_page_alloc(1); + pmap_zero_page(pg); + return pg; +} + +static pte_t *pmap_lookup_pte(pmap_t *pmap, vaddr_t va) { + pde_t *pdep; + paddr_t pa = pmap->pde; + + /* Level 0 */ + pdep = (pde_t *)PHYS_TO_DMAP(pa) + L0_INDEX(va); + if (!(pa = PTE_FRAME_ADDR(*pdep))) + return NULL; + + /* Level 1 */ + pdep = (pde_t *)PHYS_TO_DMAP(pa) + L1_INDEX(va); + if (!(pa = PTE_FRAME_ADDR(*pdep))) + return NULL; + + /* Level 2 */ + pdep = (pde_t *)PHYS_TO_DMAP(pa) + L2_INDEX(va); + if (!(pa = PTE_FRAME_ADDR(*pdep))) + return NULL; + + /* Level 3 */ + return (pde_t *)PHYS_TO_DMAP(pa) + L3_INDEX(va); +} + +static paddr_t pmap_alloc_pde(pmap_t *pmap, vaddr_t vaddr) { + vm_page_t *pg = pmap_pagealloc(); + + TAILQ_INSERT_TAIL(&pmap->pte_pages, pg, pageq); + + klog("Page table for 0x%016lx allocated at 0x%016lx", vaddr, pg->paddr); + + return pg->paddr; +} + +static pte_t make_pte(paddr_t pa, vm_prot_t prot, unsigned flags) { + pte_t pte = pa | vm_prot_map[prot]; + unsigned cacheflags = flags & PMAP_CACHE_MASK; + if (cacheflags == PMAP_NOCACHE) + return pte | ATTR_IDX(ATTR_NORMAL_MEM_NC); + if (cacheflags == PMAP_WRITE_THROUGH) + return pte | ATTR_IDX(ATTR_NORMAL_MEM_WT); + return pte | ATTR_IDX(ATTR_NORMAL_MEM_WB); +} + +static void pmap_write_pte(pmap_t *pmap, pte_t *ptep, pte_t pte) { + *ptep = pte; + tlb_invalidate(pte, pmap->asid); +} + +/* + * Return pointer to entry of va in level 3 of page table. Allocate space if + * needed. + */ + +static pte_t *pmap_ensure_pte(pmap_t *pmap, vaddr_t va) { + pde_t *pdep; + paddr_t pa = pmap->pde; + + /* Level 0 */ + pdep = (pde_t *)PHYS_TO_DMAP(pa) + L0_INDEX(va); + if (!(pa = PTE_FRAME_ADDR(*pdep))) { + pa = pmap_alloc_pde(pmap, va); + *pdep = pa | L0_TABLE; + } + + /* Level 1 */ + pdep = (pde_t *)PHYS_TO_DMAP(pa) + L1_INDEX(va); + if (!(pa = PTE_FRAME_ADDR(*pdep))) { + pa = pmap_alloc_pde(pmap, va); + *pdep = pa | L1_TABLE; + } + + /* Level 2 */ + pdep = (pde_t *)PHYS_TO_DMAP(pa) + L2_INDEX(va); + if (!(pa = PTE_FRAME_ADDR(*pdep))) { + pa = pmap_alloc_pde(pmap, va); + *pdep = pa | L2_TABLE; + } + + /* Level 3 */ + return (pde_t *)PHYS_TO_DMAP(pa) + L3_INDEX(va); +} + +void pmap_activate(pmap_t *umap) { + SCOPED_NO_PREEMPTION(); + + PCPU_SET(curpmap, umap); + + uint64_t tcr = READ_SPECIALREG(TCR_EL1); + + if (umap == NULL) { + WRITE_SPECIALREG(TCR_EL1, tcr | TCR_EPD0); + } else { + uint64_t ttbr0 = + ((uint64_t)umap->asid << ASID_SHIFT) | (umap->pde >> PAGE_SHIFT); + WRITE_SPECIALREG(TTBR0_EL1, ttbr0); + WRITE_SPECIALREG(TCR_EL1, tcr & ~TCR_EPD0); + } +} + +/* + * Wired memory interface. + */ + +void pmap_kenter(vaddr_t va, paddr_t pa, vm_prot_t prot, unsigned flags) { + pmap_t *pmap = pmap_kernel(); + + assert(page_aligned_p(pa) && page_aligned_p(va)); + assert(pmap_address_p(pmap, va)); + assert(pa != 0); + + klog("Enter unmanaged mapping from %p to %p", va, pa); + + pte_t pte = make_pte(pa, prot, flags); + + WITH_MTX_LOCK (&pmap->mtx) { + pte_t *ptep = pmap_ensure_pte(pmap, va); + pmap_write_pte(pmap, ptep, pte); + } } void pmap_kremove(vaddr_t va, size_t size) { - panic("Not implemented!"); + pmap_t *pmap = pmap_kernel(); + + assert(page_aligned_p(va) && page_aligned_p(size)); + assert(pmap_contains_p(pmap, va, va + size)); + + klog("%s: remove unmanaged mapping for %p - %p range", __func__, va, + va + size - 1); + + WITH_MTX_LOCK (&pmap->mtx) { + for (size_t off = 0; off < size; off += PAGESIZE) { + pte_t *ptep = pmap_lookup_pte(pmap, va); + assert(ptep != NULL); + pmap_write_pte(pmap, ptep, 0); + } + } +} + +bool pmap_kextract(vaddr_t va, paddr_t *pap) { + return pmap_extract(pmap_kernel(), va, pap); +} + +/* + * Pageable (user & kernel) memory interface. + */ + +static bool pmap_extract_nolock(pmap_t *pmap, vaddr_t va, paddr_t *pap) { + if (!pmap_address_p(pmap, va)) + return false; + + pte_t *ptep = pmap_lookup_pte(pmap, va); + if (ptep == NULL) + return false; + paddr_t pa = PTE_FRAME_ADDR(*ptep); + if (pa == 0) + return false; + *pap = pa | PAGE_OFFSET(va); + return true; +} + +void pmap_enter(pmap_t *pmap, vaddr_t va, vm_page_t *pg, vm_prot_t prot, + unsigned flags) { + paddr_t pa = pg->paddr; + + assert(page_aligned_p(va)); + assert(pmap_address_p(pmap, pa)); + + klog("Enter virtual mapping %p for frame %p", va, pa); + + bool kern_mapping = (pmap == pmap_kernel()); + + /* TODO(pj) Mark user pages as non-referenced & non-modified. */ + pte_t pte = make_pte(pa, prot, flags); + + WITH_MTX_LOCK (&pmap->mtx) { + pv_entry_t *pv = pv_find(pmap, va, pg); + if (pv == NULL) + pv_add(pmap, va, pg); + if (kern_mapping) + pg->flags |= PG_MODIFIED | PG_REFERENCED; + else + pg->flags &= ~(PG_MODIFIED | PG_REFERENCED); + pte_t *ptep = pmap_ensure_pte(pmap, va); + pmap_write_pte(pmap, ptep, pte); + } } void pmap_remove(pmap_t *pmap, vaddr_t start, vaddr_t end) { - panic("Not implemented!"); + assert(page_aligned_p(start) && page_aligned_p(end) && start < end); + assert(pmap_contains_p(pmap, start, end)); + + klog("Remove page mapping for address range %p-%p", start, end); + + WITH_MTX_LOCK (&pmap->mtx) { + for (vaddr_t va = start; va < end; va += PAGESIZE) { + pte_t *ptep = pmap_lookup_pte(pmap, va); + if (ptep == NULL) + continue; + paddr_t pa = PTE_FRAME_ADDR(*ptep); + if (pa == 0) + continue; + vm_page_t *pg = vm_page_find(pa); + pv_remove(pmap, va, pg); + pmap_write_pte(pmap, ptep, 0); + } + } } void pmap_protect(pmap_t *pmap, vaddr_t start, vaddr_t end, vm_prot_t prot) { - panic("Not implemented!"); -} + assert(page_aligned_p(start) && page_aligned_p(end) && start < end); + assert(pmap_contains_p(pmap, start, end)); -bool pmap_kextract(vaddr_t va, paddr_t *pap) { - panic("Not implemented!"); + klog("Change protection bits to %x for address range %p-%p", prot, start, + end); + + WITH_MTX_LOCK (&pmap->mtx) { + for (vaddr_t va = start; va < end; va += PAGESIZE) { + pte_t *ptep = pmap_lookup_pte(pmap, va); + if (ptep == NULL) + continue; + pte_t pte = vm_prot_map[prot] | (*ptep & (~ATTR_AP_MASK & ~ATTR_XN)); + pmap_write_pte(pmap, ptep, pte); + } + } } bool pmap_extract(pmap_t *pmap, vaddr_t va, paddr_t *pap) { - panic("Not implemented!"); + SCOPED_MTX_LOCK(&pmap->mtx); + return pmap_extract_nolock(pmap, va, pap); } void pmap_page_remove(vm_page_t *pg) { - panic("Not implemented!"); + while (!TAILQ_EMPTY(&pg->pv_list)) { + pv_entry_t *pv = TAILQ_FIRST(&pg->pv_list); + pmap_t *pmap = pv->pmap; + vaddr_t va = pv->va; + TAILQ_REMOVE(&pg->pv_list, pv, page_link); + TAILQ_REMOVE(&pmap->pv_list, pv, pmap_link); + pte_t *ptep = pmap_lookup_pte(pmap, va); + assert(ptep != NULL); + pmap_write_pte(pmap, ptep, 0); + pool_free(P_PV, pv); + } } void pmap_zero_page(vm_page_t *pg) { - panic("Not implemented!"); + bzero(PG_DMAP_ADDR(pg), PAGESIZE); } void pmap_copy_page(vm_page_t *src, vm_page_t *dst) { - panic("Not implemented!"); + memcpy(PG_DMAP_ADDR(src), PG_DMAP_ADDR(dst), PAGESIZE); +} + +static void pmap_modify_flags(vm_page_t *pg, pte_t set, pte_t clr) { + pv_entry_t *pv; + TAILQ_FOREACH (pv, &pg->pv_list, page_link) { + pmap_t *pmap = pv->pmap; + vaddr_t va = pv->va; + WITH_MTX_LOCK (&pmap->mtx) { + pte_t *ptep = pmap_lookup_pte(pmap, va); + assert(ptep != NULL); + pte_t pte = *ptep; + pte |= set; + pte &= ~clr; + *ptep = pte; + tlb_invalidate(pte, pmap->asid); + } + } } bool pmap_clear_referenced(vm_page_t *pg) { bool prev = pmap_is_referenced(pg); pg->flags &= ~PG_REFERENCED; - panic("Not implemented!"); + pmap_modify_flags(pg, 0, ATTR_AF); return prev; } bool pmap_clear_modified(vm_page_t *pg) { bool prev = pmap_is_modified(pg); pg->flags &= ~PG_MODIFIED; - panic("Not implemented!"); + pmap_modify_flags(pg, 0, ATTR_DBM); return prev; } @@ -95,26 +459,62 @@ bool pmap_is_modified(vm_page_t *pg) { void pmap_set_referenced(vm_page_t *pg) { pg->flags |= PG_REFERENCED; - panic("Not implemented!"); + pmap_modify_flags(pg, ATTR_AF, 0); } void pmap_set_modified(vm_page_t *pg) { pg->flags |= PG_MODIFIED; - panic("Not implemented!"); + pmap_modify_flags(pg, ATTR_DBM, 0); } -void pmap_activate(pmap_t *pmap) { - panic("Not implemented!"); +/* + * Physical map management routines. + */ + +static void pmap_setup(pmap_t *pmap) { + pmap->asid = alloc_asid(); + mtx_init(&pmap->mtx, 0); + TAILQ_INIT(&pmap->pte_pages); + TAILQ_INIT(&pmap->pv_list); } -pmap_t *pmap_kernel(void) { - panic("Not implemented!"); +void init_pmap(void) { + pmap_setup(&kernel_pmap); + kernel_pmap.pde = _kernel_pmap_pde; } -pmap_t *pmap_user(void) { - panic("Not implemented!"); +pmap_t *pmap_new(void) { + pmap_t *pmap = pool_alloc(P_PMAP, M_ZERO); + pmap_setup(pmap); + + vm_page_t *pg = pmap_pagealloc(); + TAILQ_INSERT_TAIL(&pmap->pte_pages, pg, pageq); + pmap->pde = pg->paddr; + klog("Page directory table allocated at %p", pmap->pde); + + return pmap; } -pmap_t *pmap_lookup(vaddr_t addr) { - panic("Not implemented!"); +void pmap_delete(pmap_t *pmap) { + assert(pmap != pmap_kernel()); + + while (!TAILQ_EMPTY(&pmap->pv_list)) { + pv_entry_t *pv = TAILQ_FIRST(&pmap->pv_list); + vm_page_t *pg; + paddr_t pa; + pmap_extract_nolock(pmap, pv->va, &pa); + pg = vm_page_find(pa); + TAILQ_REMOVE(&pg->pv_list, pv, page_link); + TAILQ_REMOVE(&pmap->pv_list, pv, pmap_link); + pool_free(P_PV, pv); + } + + while (!TAILQ_EMPTY(&pmap->pte_pages)) { + vm_page_t *pg = TAILQ_FIRST(&pmap->pte_pages); + TAILQ_REMOVE(&pmap->pte_pages, pg, pageq); + vm_page_free(pg); + } + + free_asid(pmap->asid); + pool_free(P_PMAP, pmap); } diff --git a/sys/aarch64/rpi3.c b/sys/aarch64/rpi3.c index 09a1607d3f..b5e080e816 100644 --- a/sys/aarch64/rpi3.c +++ b/sys/aarch64/rpi3.c @@ -63,7 +63,8 @@ void *board_stack(atag_tag_t *atags) { } static void rpi3_physmem(void) { - paddr_t ram_start = 0; + /* XXX: workaround - pmap_enter fails to physical page with address 0 */ + paddr_t ram_start = PAGESIZE; paddr_t ram_end = kenv_get_ulong("memsize"); paddr_t kern_start = (paddr_t)__boot; paddr_t kern_end = (paddr_t)_bootmem_end; diff --git a/sys/aarch64/tlb.c b/sys/aarch64/tlb.c new file mode 100644 index 0000000000..9ec56e2386 --- /dev/null +++ b/sys/aarch64/tlb.c @@ -0,0 +1,27 @@ +#include + +#define ASID_TO_PTE(x) ((uint64_t)(x) << ASID_SHIFT) + +#define __tlbi(x, r) __asm__ volatile("TLBI " x ", %0" : : "r"(r)) +#define __dsb(x) __asm__ volatile("DSB " x) +#define __isb() __asm__ volatile("ISB") + +void tlb_invalidate(pte_t pte, asid_t asid) { + __dsb("ishst"); + + if (asid > 0) { + __tlbi("vae1is", ASID_TO_PTE(asid) | (pte >> PAGE_SHIFT)); + } else { + __tlbi("vaae1is", pte >> PAGE_SHIFT); + } + + __dsb("ish"); + __isb(); +} + +void tlb_invalidate_asid(asid_t asid) { + __dsb("ishst"); + __tlbi("aside1is", ASID_TO_PTE(asid)); + __dsb("ish"); + __isb(); +} diff --git a/sys/kern/vmem.c b/sys/kern/vmem.c index c4fe74635a..6ddb3e556c 100644 --- a/sys/kern/vmem.c +++ b/sys/kern/vmem.c @@ -207,6 +207,7 @@ static void vmem_check_sanity(vmem_t *vm) { vmem_t *vmem_create(const char *name, vmem_size_t quantum) { vmem_t *vm = pool_alloc(P_VMEM, M_ZERO); + assert(vm != NULL); vm->vm_quantum = quantum; assert(quantum > 0); diff --git a/sys/mips/pmap.c b/sys/mips/pmap.c index e0f30b8cf0..9db7d24538 100644 --- a/sys/mips/pmap.c +++ b/sys/mips/pmap.c @@ -31,7 +31,7 @@ typedef struct pv_entry { static POOL_DEFINE(P_PMAP, "pmap", sizeof(pmap_t)); static POOL_DEFINE(P_PV, "pv_entry", sizeof(pv_entry_t)); -static pte_t vm_prot_map[] = { +static const pte_t vm_prot_map[] = { [VM_PROT_NONE] = 0, [VM_PROT_READ] = PTE_VALID | PTE_NO_EXEC, [VM_PROT_WRITE] = PTE_VALID | PTE_DIRTY | PTE_NO_READ | PTE_NO_EXEC, @@ -112,7 +112,7 @@ pmap_t *pmap_lookup(vaddr_t addr) { */ static asid_t alloc_asid(void) { - int free; + int free = 0; WITH_SPIN_LOCK (asid_lock) { bit_ffc(asid_used, MAX_ASID, &free); if (free < 0) @@ -274,7 +274,7 @@ void pmap_kremove(vaddr_t va, size_t size) { klog("%s: remove unmanaged mapping for %p - %p range", __func__, va, va + size - 1); - WITH_MTX_LOCK (&kernel_pmap.mtx) { + WITH_MTX_LOCK (&pmap->mtx) { for (size_t off = 0; off < size; off += PAGESIZE) pmap_pte_write(pmap, va + off, PTE_GLOBAL, 0); }