kernel/x86: Ignore non-addressable memory in paging method initialization.

If we decide to use 32-bit paging but for some reason the bootloader
hasn't trimmed memory beyond 4GB, it's important that we do that here.

For PAE, it's more important, because we may be booting on a system
with more than 64 GB of RAM, but PAE can only address up to 36-bit
physical addresses, so we need to ignore anything beyond that.

May finally fix the rest of #19117.
This commit is contained in:
Augustin Cavalier 2024-10-18 17:59:49 -04:00
parent 6e8900976d
commit c04087a9af
2 changed files with 20 additions and 0 deletions

View File

@ -276,6 +276,16 @@ X86PagingMethod32Bit::Init(kernel_args* args,
{
TRACE("X86PagingMethod32Bit::Init(): entry\n");
// Ignore all memory beyond the maximum 32-bit address.
static const phys_addr_t kLimit = 1ULL << 32;
for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
addr_range& range = args->physical_memory_range[i];
if (range.start >= kLimit)
range.size = 0;
else if ((range.start + range.size) > kLimit)
range.size = kLimit - range.start;
}
// page hole set up in stage2
fPageHole = (page_table_entry*)(addr_t)args->arch_args.page_hole;
// calculate where the pgdir would be

View File

@ -584,6 +584,16 @@ status_t
X86PagingMethodPAE::Init(kernel_args* args,
VMPhysicalPageMapper** _physicalPageMapper)
{
// Ignore all memory beyond the maximum PAE address.
static const phys_addr_t kLimit = 1ULL << 36;
for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
addr_range& range = args->physical_memory_range[i];
if (range.start >= kLimit)
range.size = 0;
else if ((range.start + range.size) > kLimit)
range.size = kLimit - range.start;
}
// switch to PAE
ToPAESwitcher(args).Switch(fKernelVirtualPageDirPointerTable,
fKernelPhysicalPageDirPointerTable, fEarlyPageStructures,