boot/efi/riscv64: use generic mmu functions

Change-Id: I671f41b9c163c522ffa7ffcb2509f279c4c9496a
Reviewed-on: https://review.haiku-os.org/c/haiku/+/5701
Tested-by: Commit checker robot <no-reply+buildbot@haiku-os.org>
Reviewed-by: Fredrik Holmqvist <fredrik.holmqvist@gmail.com>
This commit is contained in:
David Karoly 2022-09-30 13:25:33 +02:00 committed by waddlesplash
parent c3f23e8222
commit a3fb982730
3 changed files with 154 additions and 161 deletions

View File

@ -1,6 +1,7 @@
SubDir HAIKU_TOP src system boot platform efi arch riscv64 ;
SubDirHdrs $(HAIKU_TOP) src system boot platform efi ;
SubDirHdrs $(SUBDIR) $(DOTDOT) $(DOTDOT) ;
SubDirHdrs $(SUBDIR) $(DOTDOT) generic ;
UseLibraryHeaders [ FDirName libfdt ] ;
@ -9,23 +10,31 @@ UsePrivateHeaders [ FDirName kernel boot platform efi ] ;
local platform ;
for platform in [ MultiBootSubDirSetup efi ] {
on $(platform) {
on $(platform) {
local arch_src =
crt0-efi-$(TARGET_ARCH).S
entry.S
relocation_func.cpp
arch_dtb.cpp
arch_mmu.cpp
arch_smp.cpp
arch_start.cpp
arch_timer.cpp
arch_traps.cpp
arch_traps_asm.S
;
local arch_src =
crt0-efi-$(TARGET_ARCH).S
entry.S
relocation_func.cpp
arch_dtb.cpp
arch_mmu.cpp
arch_smp.cpp
arch_start.cpp
arch_timer.cpp
arch_traps.cpp
arch_traps_asm.S
;
BootMergeObject boot_platform_efi_riscv64.o :
$(arch_src)
;
local generic_src =
generic_mmu.cpp
;
BootMergeObject boot_platform_efi_riscv64.o :
$(arch_src)
$(generic_src)
;
SEARCH on [ FGristFiles $(generic_src) ]
= [ FDirName $(SUBDIR) $(DOTDOT) generic ] ;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2019-2020 Haiku, Inc. All rights reserved.
* Copyright 2019-2022 Haiku, Inc. All rights reserved.
* Released under the terms of the MIT License.
*/
@ -14,10 +14,27 @@
#include <efi/boot-services.h>
#include <string.h>
#include "mmu.h"
#include "efi_platform.h"
#include "generic_mmu.h"
#include "mmu.h"
//#define TRACE_MMU
#ifdef TRACE_MMU
# define TRACE(x...) dprintf(x)
#else
# define TRACE(x...) ;
#endif
//#define TRACE_MEMORY_MAP
// Ignore memory above 512GB
#define PHYSICAL_MEMORY_LOW 0x00000000
#define PHYSICAL_MEMORY_HIGH 0x8000000000ull
#define RESERVED_MEMORY_BASE 0x80000000
phys_addr_t sPageTable = 0;
@ -153,7 +170,7 @@ LookupPte(addr_t virtAdr, bool alloc)
static void
Map(addr_t virtAdr, phys_addr_t physAdr, uint64 flags)
{
// dprintf("Map(%#" B_PRIxADDR ", %#" B_PRIxADDR ")\n", virtAdr, physAdr);
// TRACE("Map(%#" B_PRIxADDR ", %#" B_PRIxADDR ")\n", virtAdr, physAdr);
Pte* pte = LookupPte(virtAdr, true);
if (pte == NULL) panic("can't allocate page table");
@ -165,7 +182,7 @@ Map(addr_t virtAdr, phys_addr_t physAdr, uint64 flags)
static void
MapRange(addr_t virtAdr, phys_addr_t physAdr, size_t size, uint64 flags)
{
dprintf("MapRange(%#" B_PRIxADDR " - %#" B_PRIxADDR ", %#" B_PRIxADDR " - %#" B_PRIxADDR ", %#"
TRACE("MapRange(%#" B_PRIxADDR " - %#" B_PRIxADDR ", %#" B_PRIxADDR " - %#" B_PRIxADDR ", %#"
B_PRIxADDR ")\n", virtAdr, virtAdr + (size - 1), physAdr, physAdr + (size - 1), size);
for (size_t i = 0; i < size; i += B_PAGE_SIZE)
Map(virtAdr + i, physAdr + i, flags);
@ -174,6 +191,21 @@ MapRange(addr_t virtAdr, phys_addr_t physAdr, size_t size, uint64 flags)
}
static void
insert_virtual_range_to_keep(uint64 start, uint64 size)
{
status_t status = insert_address_range(
gKernelArgs.arch_args.virtual_ranges_to_keep,
&gKernelArgs.arch_args.num_virtual_ranges_to_keep,
MAX_VIRTUAL_RANGES_TO_KEEP, start, size);
if (status == B_ENTRY_NOT_FOUND)
panic("too many virtual ranges to keep");
else if (status != B_OK)
panic("failed to add virtual range to keep");
}
static void
MapAddrRange(addr_range& range, uint64 flags)
{
@ -186,13 +218,7 @@ MapAddrRange(addr_range& range, uint64 flags)
range.start = get_next_virtual_address(range.size);
MapRange(range.start, physAdr, range.size, flags);
if (gKernelArgs.arch_args.num_virtual_ranges_to_keep
>= MAX_VIRTUAL_RANGES_TO_KEEP)
panic("too many virtual ranges to keep");
gKernelArgs.arch_args.virtual_ranges_to_keep[
gKernelArgs.arch_args.num_virtual_ranges_to_keep++] = range;
insert_virtual_range_to_keep(range.start, range.size);
}
@ -239,100 +265,6 @@ GetPhysMemRange(addr_range& range)
}
static void
FillPhysicalMemoryMap(size_t memory_map_size,
efi_memory_descriptor *memory_map, size_t descriptor_size,
uint32_t descriptor_version)
{
// Add physical memory to the kernel args and update virtual addresses for
// EFI regions.
gKernelArgs.num_physical_memory_ranges = 0;
// First scan: Add all usable ranges
for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
efi_memory_descriptor* entry = &memory_map[i];
switch (entry->Type) {
case EfiLoaderCode:
case EfiLoaderData:
case EfiBootServicesCode:
case EfiBootServicesData:
case EfiConventionalMemory: {
// Usable memory.
uint64_t base = entry->PhysicalStart;
uint64_t end = entry->PhysicalStart + entry->NumberOfPages * 4096;
uint64_t originalSize = end - base;
// PMP protected memory, unusable
if (base == 0x80000000)
break;
gKernelArgs.ignored_physical_memory
+= originalSize - (std::max(end, base) - base);
if (base >= end)
break;
uint64_t size = end - base;
insert_physical_memory_range(base, size);
break;
}
case EfiACPIReclaimMemory:
// ACPI reclaim -- physical memory we could actually use later
break;
case EfiRuntimeServicesCode:
case EfiRuntimeServicesData:
entry->VirtualStart = entry->PhysicalStart;
break;
}
}
uint64_t initialPhysicalMemory = total_physical_memory();
// Second scan: Remove everything reserved that may overlap
for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
efi_memory_descriptor* entry = &memory_map[i];
switch (entry->Type) {
case EfiLoaderCode:
case EfiLoaderData:
case EfiBootServicesCode:
case EfiBootServicesData:
case EfiConventionalMemory:
break;
default:
uint64_t base = entry->PhysicalStart;
uint64_t end = entry->PhysicalStart + entry->NumberOfPages * 4096;
remove_physical_memory_range(base, end - base);
}
}
gKernelArgs.ignored_physical_memory
+= initialPhysicalMemory - total_physical_memory();
sort_address_ranges(gKernelArgs.physical_memory_range,
gKernelArgs.num_physical_memory_ranges);
}
static void
FillPhysicalAllocatedMemoryMap(size_t memory_map_size,
efi_memory_descriptor *memory_map, size_t descriptor_size,
uint32_t descriptor_version)
{
for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
efi_memory_descriptor* entry = &memory_map[i];
switch (entry->Type) {
case EfiLoaderData:
insert_physical_allocated_range(entry->PhysicalStart, entry->NumberOfPages * B_PAGE_SIZE);
break;
default:
;
}
}
sort_address_ranges(gKernelArgs.physical_allocated_range,
gKernelArgs.num_physical_allocated_ranges);
}
//#pragma mark -
@ -347,66 +279,123 @@ arch_mmu_post_efi_setup(size_t memory_map_size,
efi_memory_descriptor *memory_map, size_t descriptor_size,
uint32_t descriptor_version)
{
FillPhysicalAllocatedMemoryMap(memory_map_size, memory_map, descriptor_size, descriptor_version);
build_physical_allocated_list(memory_map_size, memory_map,
descriptor_size, descriptor_version);
// Switch EFI to virtual mode, using the kernel pmap.
kRuntimeServices->SetVirtualAddressMap(memory_map_size, descriptor_size,
descriptor_version, memory_map);
#ifdef TRACE_MEMORY_MAP
dprintf("phys memory ranges:\n");
for (uint32_t i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
uint64 start = gKernelArgs.physical_memory_range[i].start;
uint64 size = gKernelArgs.physical_memory_range[i].size;
dprintf(" 0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
start, start + size, size);
}
dprintf("allocated phys memory ranges:\n");
for (uint32_t i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
uint64 start = gKernelArgs.physical_allocated_range[i].start;
uint64 size = gKernelArgs.physical_allocated_range[i].size;
dprintf(" 0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
start, start + size, size);
}
dprintf("allocated virt memory ranges:\n");
for (uint32_t i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
uint64 start = gKernelArgs.virtual_allocated_range[i].start;
uint64 size = gKernelArgs.virtual_allocated_range[i].size;
dprintf(" 0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
start, start + size, size);
}
dprintf("virt memory ranges to keep:\n");
for (uint32_t i = 0; i < gKernelArgs.arch_args.num_virtual_ranges_to_keep; i++) {
uint64 start = gKernelArgs.arch_args.virtual_ranges_to_keep[i].start;
uint64 size = gKernelArgs.arch_args.virtual_ranges_to_keep[i].size;
dprintf(" 0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
start, start + size, size);
}
#endif
}
static void
fix_memory_map_for_m_mode(size_t memoryMapSize, efi_memory_descriptor* memoryMap,
size_t descriptorSize, uint32_t descriptorVersion)
{
addr_t addr = (addr_t)memoryMap;
for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptorSize);
if (entry->PhysicalStart == RESERVED_MEMORY_BASE) {
entry->Type = EfiReservedMemoryType;
}
}
}
uint64
arch_mmu_generate_post_efi_page_tables(size_t memory_map_size,
efi_memory_descriptor *memory_map, size_t descriptor_size,
uint32_t descriptor_version)
arch_mmu_generate_post_efi_page_tables(size_t memoryMapSize, efi_memory_descriptor* memoryMap,
size_t descriptorSize, uint32_t descriptorVersion)
{
sPageTable = mmu_allocate_page();
memset(VirtFromPhys(sPageTable), 0, B_PAGE_SIZE);
dprintf("sPageTable: %#" B_PRIxADDR "\n", sPageTable);
TRACE("sPageTable: %#" B_PRIxADDR "\n", sPageTable);
PreallocKernelRange();
gKernelArgs.num_virtual_allocated_ranges = 0;
gKernelArgs.arch_args.num_virtual_ranges_to_keep = 0;
FillPhysicalMemoryMap(memory_map_size, memory_map, descriptor_size, descriptor_version);
fix_memory_map_for_m_mode(memoryMapSize, memoryMap, descriptorSize, descriptorVersion);
build_physical_memory_list(memoryMapSize, memoryMap, descriptorSize, descriptorVersion,
PHYSICAL_MEMORY_LOW, PHYSICAL_MEMORY_HIGH);
addr_range physMemRange;
GetPhysMemRange(physMemRange);
dprintf("physMemRange: %#" B_PRIxADDR ", %#" B_PRIxSIZE "\n", physMemRange.start, physMemRange.size);
TRACE("physMemRange: %#" B_PRIxADDR ", %#" B_PRIxSIZE "\n",
physMemRange.start, physMemRange.size);
// Physical memory mapping
gKernelArgs.arch_args.physMap.start = KERNEL_TOP + 1 - physMemRange.size;
gKernelArgs.arch_args.physMap.size = physMemRange.size;
MapRange(gKernelArgs.arch_args.physMap.start, physMemRange.start, physMemRange.size, (1 << pteRead) | (1 << pteWrite));
MapRange(gKernelArgs.arch_args.physMap.start, physMemRange.start, physMemRange.size,
(1 << pteRead) | (1 << pteWrite));
// Boot loader
dprintf("Boot loader:\n");
for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
efi_memory_descriptor* entry = &memory_map[i];
TRACE("Boot loader:\n");
for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
efi_memory_descriptor* entry = &memoryMap[i];
switch (entry->Type) {
case EfiLoaderCode:
case EfiLoaderData:
MapRange(entry->VirtualStart, entry->PhysicalStart, entry->NumberOfPages * B_PAGE_SIZE, (1 << pteRead) | (1 << pteWrite) | (1 << pteExec));
MapRange(entry->VirtualStart, entry->PhysicalStart, entry->NumberOfPages * B_PAGE_SIZE,
(1 << pteRead) | (1 << pteWrite) | (1 << pteExec));
break;
default:
;
}
}
dprintf("Boot loader stack\n");
TRACE("Boot loader stack\n");
addr_t sp = Sp();
addr_t stackTop = ROUNDDOWN(sp - 1024*64, B_PAGE_SIZE);
dprintf(" SP: %#" B_PRIxADDR "\n", sp);
TRACE(" SP: %#" B_PRIxADDR "\n", sp);
// EFI runtime services
dprintf("EFI runtime services:\n");
for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
efi_memory_descriptor* entry = &memory_map[i];
TRACE("EFI runtime services:\n");
for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
efi_memory_descriptor* entry = &memoryMap[i];
if ((entry->Attribute & EFI_MEMORY_RUNTIME) != 0)
MapRange(entry->VirtualStart, entry->PhysicalStart, entry->NumberOfPages * B_PAGE_SIZE, (1 << pteRead) | (1 << pteWrite) | (1 << pteExec));
MapRange(entry->VirtualStart, entry->PhysicalStart, entry->NumberOfPages * B_PAGE_SIZE,
(1 << pteRead) | (1 << pteWrite) | (1 << pteExec));
}
// Memory regions
dprintf("Regions:\n");
TRACE("Regions:\n");
void* cookie = NULL;
addr_t virtAdr;
phys_addr_t physAdr;
@ -416,7 +405,7 @@ arch_mmu_generate_post_efi_page_tables(size_t memory_map_size,
}
// Devices
dprintf("Devices:\n");
TRACE("Devices:\n");
MapAddrRange(gKernelArgs.arch_args.clint, (1 << pteRead) | (1 << pteWrite));
MapAddrRange(gKernelArgs.arch_args.htif, (1 << pteRead) | (1 << pteWrite));
MapAddrRange(gKernelArgs.arch_args.plic, (1 << pteRead) | (1 << pteWrite));
@ -430,7 +419,8 @@ arch_mmu_generate_post_efi_page_tables(size_t memory_map_size,
(1 << pteRead) | (1 << pteWrite));
}
sort_address_ranges(gKernelArgs.virtual_allocated_range, gKernelArgs.num_virtual_allocated_ranges);
sort_address_ranges(gKernelArgs.virtual_allocated_range,
gKernelArgs.num_virtual_allocated_ranges);
DumpPageTable(GetSatp());

View File

@ -10,11 +10,12 @@
#include <arch_cpu_defs.h>
#include "arch_traps.h"
#include "efi_platform.h"
#include "generic_mmu.h"
#include "mmu.h"
#include "serial.h"
#include "smp.h"
#include "efi_platform.h"
#include "arch_traps.h"
// From entry.S
@ -92,22 +93,15 @@ arch_start_kernel(addr_t kernelEntry)
for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
efi_memory_descriptor *entry
= (efi_memory_descriptor *)(addr + i * descriptor_size);
dprintf(" phys: %#lx, virt: %#lx, size: %#lx, ",
entry->PhysicalStart, entry->VirtualStart,
entry->NumberOfPages * B_PAGE_SIZE);
switch (entry->Type) {
case EfiReservedMemoryType: dprintf("reservedMemoryType"); break;
case EfiLoaderCode: dprintf("loaderCode"); break;
case EfiLoaderData: dprintf("loaderData"); break;
case EfiBootServicesCode: dprintf("bootServicesCode"); break;
case EfiBootServicesData: dprintf("bootServicesData"); break;
case EfiConventionalMemory: dprintf("conventionalMemory"); break;
case EfiACPIReclaimMemory: dprintf("ACPIReclaimMemory"); break;
case EfiRuntimeServicesCode: dprintf("runtimeServicesCode"); break;
case EfiRuntimeServicesData: dprintf("runtimeServicesData"); break;
default: dprintf("?(%d)", entry->Type);
}
dprintf(", attrs: %#lx\n", entry->Attribute);
dprintf(" phys: 0x%08" PRIx64 "-0x%08" PRIx64
", virt: 0x%08" PRIx64 "-0x%08" PRIx64
", type: %s (%#x), attr: %#" PRIx64 "\n",
entry->PhysicalStart,
entry->PhysicalStart + entry->NumberOfPages * B_PAGE_SIZE,
entry->VirtualStart,
entry->VirtualStart + entry->NumberOfPages * B_PAGE_SIZE,
memory_region_type_str(entry->Type), entry->Type,
entry->Attribute);
}
// Generate page tables for use after ExitBootServices.