kernel: Add maxAddress constraint to vm_allocate_early_physical_page.

Needed by the x86 PAE code.

Change-Id: I982dfa96427addfc903205f49f53a46462534843
Reviewed-on: https://review.haiku-os.org/c/haiku/+/8457
Tested-by: Commit checker robot <no-reply+buildbot@haiku-os.org>
Reviewed-by: waddlesplash <waddlesplash@gmail.com>
This commit is contained in:
Augustin Cavalier 2024-10-14 22:27:05 -04:00 committed by waddlesplash
parent 6ff28d8731
commit 5f87420792
3 changed files with 29 additions and 17 deletions

View File

@ -59,8 +59,9 @@ status_t vm_init_post_modules(struct kernel_args *args);
void vm_free_kernel_args(struct kernel_args *args);
void vm_free_unused_boot_loader_range(addr_t start, addr_t end);
page_num_t vm_allocate_early_physical_page(kernel_args *args);
page_num_t vm_allocate_early_physical_page_etc(kernel_args *args, phys_addr_t maxAddress = 0);
addr_t vm_allocate_early(struct kernel_args *args, size_t virtualSize,
size_t physicalSize, uint32 attributes, addr_t alignment);
size_t physicalSize, uint32 attributes, addr_t alignment);
void slab_init(struct kernel_args *args);
void slab_init_post_area();

View File

@ -241,7 +241,7 @@ private:
uint32 oldPageTableCount = virtualSize / B_PAGE_SIZE / 1024;
for (uint32 i = 0; i < oldPageTableCount; i++) {
// allocate a page
phys_addr_t physicalTable =_AllocatePage32Bit();
phys_addr_t physicalTable = _Allocate32BitPage();
// put the page into the page dir
page_directory_entry* entry = &fPageHolePageDir[
@ -261,7 +261,7 @@ private:
// allocate and map the pages we need
for (uint32 i = 0; i < pagesNeeded; i++) {
// allocate a page
phys_addr_t physicalAddress =_AllocatePage32Bit();
phys_addr_t physicalAddress = _Allocate32BitPage();
// put the page into the page table
page_table_entry* entry = fPageHole + virtualBase / B_PAGE_SIZE + i;
@ -281,21 +281,13 @@ private:
= (addr_t)(fAllocatedPages + pagesNeeded * B_PAGE_SIZE);
}
phys_addr_t _AllocatePage()
phys_addr_t _Allocate32BitPage()
{
phys_addr_t physicalAddress
= (phys_addr_t)vm_allocate_early_physical_page(fKernelArgs)
= (phys_addr_t)vm_allocate_early_physical_page_etc(fKernelArgs, 0xffffffff)
* B_PAGE_SIZE;
if (physicalAddress == 0)
panic("Failed to allocate page for the switch to PAE!");
return physicalAddress;
}
phys_addr_t _AllocatePage32Bit()
{
phys_addr_t physicalAddress = _AllocatePage();
if (physicalAddress > 0xffffffff) {
panic("Failed to allocate 32 bit addressable page for the switch "
if (physicalAddress == 0 || physicalAddress > 0xffffffff) {
panic("Failed to allocate 32-bit-addressable page for the switch "
"to PAE!");
return 0;
}

View File

@ -4336,18 +4336,31 @@ is_page_in_physical_memory_range(kernel_args* args, phys_addr_t address)
page_num_t
vm_allocate_early_physical_page(kernel_args* args)
{
return vm_allocate_early_physical_page_etc(args);
}
page_num_t
vm_allocate_early_physical_page_etc(kernel_args* args, phys_addr_t maxAddress)
{
if (args->num_physical_allocated_ranges == 0) {
panic("early physical page allocations no longer possible!");
return 0;
}
if (maxAddress == 0)
maxAddress = __HAIKU_PHYS_ADDR_MAX;
// Try expanding the existing physical ranges upwards.
for (int32 i = args->num_physical_allocated_ranges - 1; i > 0; i--) {
addr_range& range = args->physical_allocated_range[i];
phys_addr_t nextPage = range.start + range.size;
// make sure the next page does not collide with the next allocated range
// check constraints
if (nextPage > maxAddress)
continue;
// make sure the page does not collide with the next allocated range
if ((i + 1) < (int32)args->num_physical_allocated_ranges) {
addr_range& nextRange = args->physical_allocated_range[i + 1];
if (nextRange.size != 0 && nextPage >= nextRange.start)
@ -4366,7 +4379,11 @@ vm_allocate_early_physical_page(kernel_args* args)
addr_range& range = args->physical_allocated_range[i];
phys_addr_t nextPage = range.start - B_PAGE_SIZE;
// make sure the next page does not collide with the previous allocated range
// check constraints
if (nextPage > maxAddress)
continue;
// make sure the page does not collide with the previous allocated range
if (i > 0) {
addr_range& previousRange = args->physical_allocated_range[i - 1];
if (previousRange.size != 0 && nextPage < (previousRange.start + previousRange.size))
@ -4393,6 +4410,8 @@ vm_allocate_early_physical_page(kernel_args* args)
// Ignore everything before the last-allocated page, as well as small ranges.
if (range.start < lastPage || range.size < (B_PAGE_SIZE * 128))
continue;
if (range.start > maxAddress)
break;
nextPage = range.start;
break;