mirror of
https://review.haiku-os.org/haiku
synced 2024-11-23 15:28:58 +01:00
bootloader: Overhaul heap region allocation.
Previously, there was only platform_init_heap/platform_release_heap, which allocated a single static heap region for the heap to use, and any subsequent heap allocations had to go through the standard platform_allocate_region, which allocates regions visible both to the bootloader and the kernel. But as mentioned in previous changes, it isn't always easy to release regions allocated that way. And besides, some bootloaders (like EFI) use a completely separate mechanism to allocate bootloader-local memory, which will never get "leaked" into the kernel. So instead, refactor all platforms to instead provide two new methods: platform_{allocate,free}_heap_region. On EFI this is easy to implement; on most other platforms we have logic based more on the old platform_init_heap or allocate_region. (On the BIOS loader in particular, we can only fully release the memory if it's the last thing we allocated in the physical addresses. If the "large allocation" threshhold is lowered back to 16 KB, then we are unable to do this enough times that we will run past the end of the 8 MB identity map and thus fail to boot. But with the larger threshhold, we don't leak nearly as much, and don't hit the threshhold.) This should further reduce the amount of bootloader memory permanently "leaked" into the kernel's used memory, though on some platforms it may still be nonzero. Change-Id: I5b2257fc5a425c024f298291f1401a26ea246383 Reviewed-on: https://review.haiku-os.org/c/haiku/+/8440 Reviewed-by: waddlesplash <waddlesplash@gmail.com>
This commit is contained in:
parent
359a04ba96
commit
875b13d582
@ -21,8 +21,8 @@ extern void panic(const char *format, ...);
|
||||
extern void dprintf(const char *format, ...);
|
||||
|
||||
/* heap functions */
|
||||
extern void platform_release_heap(struct stage2_args *args, void *base);
|
||||
extern status_t platform_init_heap(struct stage2_args *args, void **_base, void **_top);
|
||||
extern ssize_t platform_allocate_heap_region(size_t size, void **_base);
|
||||
extern void platform_free_heap_region(void *_base, size_t size);
|
||||
|
||||
/* MMU/memory functions */
|
||||
extern status_t platform_allocate_region(void **_virtualAddress, size_t size,
|
||||
|
@ -665,25 +665,29 @@ platform_free_region(void *address, size_t size)
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
platform_release_heap(struct stage2_args *args, void *base)
|
||||
ssize_t
|
||||
platform_allocate_heap_region(size_t size, void **_base)
|
||||
{
|
||||
// It will be freed automatically, since it is in the
|
||||
// identity mapped region, and not stored in the kernel's
|
||||
// page tables.
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
platform_init_heap(struct stage2_args *args, void **_base, void **_top)
|
||||
{
|
||||
void *heap = (void *)get_next_physical_address(args->heap_size);
|
||||
if (heap == NULL)
|
||||
addr_t base = get_next_physical_address(size);
|
||||
if (base == 0)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
*_base = heap;
|
||||
*_top = (void *)((int8 *)heap + args->heap_size);
|
||||
return B_OK;
|
||||
if ((base + size) > (32 * 1024 * 1024))
|
||||
panic("platform_allocate_heap_region: region end is beyond identity map");
|
||||
|
||||
*_base = (void*)base;
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
platform_free_heap_region(void *_base, size_t size)
|
||||
{
|
||||
addr_t base = (addr_t)_base;
|
||||
status_t status = remove_physical_allocated_range(base, size);
|
||||
if (status == B_OK && sNextPhysicalAddress == (base + size))
|
||||
sNextPhysicalAddress -= size;
|
||||
|
||||
// Failures don't matter very much as regions should be freed automatically,
|
||||
// since they're in the identity map and not stored in the kernel's page tables.
|
||||
}
|
||||
|
@ -56,8 +56,7 @@ const static size_t kAlignment = 8;
|
||||
const static size_t kDefaultHeapSize = (1024 + 512) * 1024;
|
||||
// default initial heap size, unless overridden by platform loader
|
||||
const static size_t kLargeAllocationThreshold = 128 * 1024;
|
||||
// allocations of this size or larger are allocated via
|
||||
// platform_allocate_region()
|
||||
// allocations of this size or larger are allocated separately
|
||||
|
||||
|
||||
class Chunk {
|
||||
@ -180,14 +179,17 @@ struct LargeAllocation {
|
||||
|
||||
status_t Allocate(size_t size)
|
||||
{
|
||||
fSize = size;
|
||||
return platform_allocate_region(&fAddress, fSize,
|
||||
B_READ_AREA | B_WRITE_AREA, false);
|
||||
ssize_t actualSize = platform_allocate_heap_region(size, &fAddress);
|
||||
if (actualSize < 0)
|
||||
return actualSize;
|
||||
|
||||
fSize = actualSize;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
void Free()
|
||||
{
|
||||
platform_free_region(fAddress, fSize);
|
||||
platform_free_heap_region(fAddress, fSize);
|
||||
}
|
||||
|
||||
void* Address() const
|
||||
@ -390,7 +392,7 @@ heap_release(stage2_args* args)
|
||||
allocation = next;
|
||||
}
|
||||
|
||||
platform_release_heap(args, sHeapBase);
|
||||
platform_free_heap_region(sHeapBase, (addr_t)sHeapEnd - (addr_t)sHeapBase);
|
||||
}
|
||||
|
||||
|
||||
@ -411,13 +413,13 @@ heap_init(stage2_args* args)
|
||||
args->heap_size = kDefaultHeapSize;
|
||||
|
||||
void* base;
|
||||
void* top;
|
||||
if (platform_init_heap(args, &base, &top) < B_OK)
|
||||
ssize_t size = platform_allocate_heap_region(args->heap_size, &base);
|
||||
if (size < 0)
|
||||
return B_ERROR;
|
||||
|
||||
sHeapBase = base;
|
||||
sHeapEnd = top;
|
||||
sMaxHeapSize = (uint8*)top - (uint8*)base;
|
||||
sHeapEnd = (void*)((addr_t)base + size);
|
||||
sMaxHeapSize = (uint8*)sHeapEnd - (uint8*)sHeapBase;
|
||||
|
||||
// declare the whole heap as one chunk, and add it
|
||||
// to the free list
|
||||
|
@ -663,25 +663,31 @@ platform_free_region(void *address, size_t size)
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
platform_release_heap(struct stage2_args *args, void *base)
|
||||
ssize_t
|
||||
platform_allocate_heap_region(size_t size, void **_base)
|
||||
{
|
||||
// It will be freed automatically, since it is in the
|
||||
// identity mapped region, and not stored in the kernel's
|
||||
// page tables.
|
||||
addr_t base = get_next_physical_address(size);
|
||||
if (base == 0)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
if ((base + size) > (32 * 1024 * 1024))
|
||||
panic("platform_allocate_heap_region: region end is beyond identity map");
|
||||
|
||||
*_base = (void*)base;
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
platform_init_heap(struct stage2_args *args, void **_base, void **_top)
|
||||
void
|
||||
platform_free_heap_region(void *_base, size_t size)
|
||||
{
|
||||
void *heap = (void *)get_next_physical_address(args->heap_size);
|
||||
if (heap == NULL)
|
||||
return B_NO_MEMORY;
|
||||
addr_t base = (addr_t)_base;
|
||||
status_t status = remove_physical_allocated_range(base, size);
|
||||
if (status == B_OK && sNextPhysicalAddress == (base + size))
|
||||
sNextPhysicalAddress -= size;
|
||||
|
||||
*_base = heap;
|
||||
*_top = (void *)((int8 *)heap + args->heap_size);
|
||||
return B_OK;
|
||||
// Failures don't matter very much as regions should be freed automatically,
|
||||
// since they're in the identity map and not stored in the kernel's page tables.
|
||||
}
|
||||
|
||||
|
||||
|
@ -668,25 +668,31 @@ platform_free_region(void *address, size_t size)
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
platform_release_heap(struct stage2_args *args, void *base)
|
||||
ssize_t
|
||||
platform_allocate_heap_region(size_t size, void **_base)
|
||||
{
|
||||
// It will be freed automatically, since it is in the
|
||||
// identity mapped region, and not stored in the kernel's
|
||||
// page tables.
|
||||
addr_t base = get_next_physical_address(size);
|
||||
if (base == 0)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
if ((base + size) > (32 * 1024 * 1024))
|
||||
panic("platform_allocate_heap_region: region end is beyond identity map");
|
||||
|
||||
*_base = (void*)base;
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
platform_init_heap(struct stage2_args *args, void **_base, void **_top)
|
||||
void
|
||||
platform_free_heap_region(void *_base, size_t size)
|
||||
{
|
||||
void *heap = (void *)get_next_physical_address(args->heap_size);
|
||||
if (heap == NULL)
|
||||
return B_NO_MEMORY;
|
||||
addr_t base = (addr_t)_base;
|
||||
status_t status = remove_physical_allocated_range(base, size);
|
||||
if (status == B_OK && sNextPhysicalAddress == (base + size))
|
||||
sNextPhysicalAddress -= size;
|
||||
|
||||
*_base = heap;
|
||||
*_top = (void *)((int8 *)heap + args->heap_size);
|
||||
return B_OK;
|
||||
// Failures don't matter very much as regions should be freed automatically,
|
||||
// since they're in the identity map and not stored in the kernel's page tables.
|
||||
}
|
||||
|
||||
|
||||
|
@ -73,6 +73,7 @@ segment_descriptor gBootGDT[BOOT_GDT_SEGMENT_COUNT];
|
||||
|
||||
static const uint32 kDefaultPageTableFlags = 0x07; // present, user, R/W
|
||||
static const size_t kMaxKernelSize = 0x1000000; // 16 MB for the kernel
|
||||
static const size_t kIdentityMapEnd = (8 * 1024 * 1024);
|
||||
|
||||
// working page directory and page table
|
||||
static uint32 *sPageDirectory = 0;
|
||||
@ -170,7 +171,7 @@ add_page_table(addr_t base)
|
||||
|
||||
// Get new page table and clear it out
|
||||
uint32 *pageTable = get_next_page_table();
|
||||
if (pageTable > (uint32 *)(8 * 1024 * 1024)) {
|
||||
if (pageTable > (uint32 *)kIdentityMapEnd) {
|
||||
panic("tried to add page table beyond the identity mapped 8 MB "
|
||||
"region\n");
|
||||
return NULL;
|
||||
@ -809,25 +810,32 @@ platform_free_region(void *address, size_t size)
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
platform_release_heap(struct stage2_args *args, void *base)
|
||||
ssize_t
|
||||
platform_allocate_heap_region(size_t size, void **_base)
|
||||
{
|
||||
// It will be freed automatically, since it is in the
|
||||
// identity mapped region, and not stored in the kernel's
|
||||
// page tables.
|
||||
addr_t base = get_next_physical_address(size);
|
||||
if (base == 0)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
if ((base + size) > kIdentityMapEnd)
|
||||
panic("platform_allocate_heap_region: region end is beyond identity map");
|
||||
|
||||
*_base = (void*)base;
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
platform_init_heap(struct stage2_args *args, void **_base, void **_top)
|
||||
void
|
||||
platform_free_heap_region(void *_base, size_t size)
|
||||
{
|
||||
void *heap = (void *)get_next_physical_address(args->heap_size);
|
||||
if (heap == NULL)
|
||||
return B_NO_MEMORY;
|
||||
addr_t base = (addr_t)_base;
|
||||
if (sNextPhysicalAddress == (base + size)) {
|
||||
sNextPhysicalAddress -= size;
|
||||
remove_physical_allocated_range(sNextPhysicalAddress, size);
|
||||
}
|
||||
|
||||
*_base = heap;
|
||||
*_top = (void *)((int8 *)heap + args->heap_size);
|
||||
return B_OK;
|
||||
// Failures don't matter very much as regions should be freed automatically,
|
||||
// since they're in the identity map and not stored in the kernel's page tables.
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,40 +1,29 @@
|
||||
/*
|
||||
* Copyright 2016 Haiku, Inc. All rights reserved.
|
||||
* Copyright 2016-2024, Haiku, Inc. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include <boot/platform.h>
|
||||
#include <boot/stage2.h>
|
||||
|
||||
#include "efi_platform.h"
|
||||
|
||||
|
||||
#define STAGE_PAGES 0x2000 /* 32 MB */
|
||||
extern "C" ssize_t
|
||||
platform_allocate_heap_region(size_t _size, void** _base)
|
||||
{
|
||||
size_t pages = (_size + (B_PAGE_SIZE - 1)) / B_PAGE_SIZE;
|
||||
efi_physical_addr base;
|
||||
if (kBootServices->AllocatePages(AllocateAnyPages, EfiLoaderData, pages, &base) != EFI_SUCCESS)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
|
||||
static efi_physical_addr staging;
|
||||
*_base = (void*)base;
|
||||
return pages * B_PAGE_SIZE;
|
||||
}
|
||||
|
||||
|
||||
extern "C" void
|
||||
platform_release_heap(struct stage2_args *args, void *base)
|
||||
platform_free_heap_region(void* base, size_t size)
|
||||
{
|
||||
if ((void*)staging != base)
|
||||
panic("Attempt to release heap with wrong base address!");
|
||||
|
||||
kBootServices->FreePages(staging, STAGE_PAGES);
|
||||
}
|
||||
|
||||
|
||||
extern "C" status_t
|
||||
platform_init_heap(struct stage2_args *args, void **_base, void **_top)
|
||||
{
|
||||
if (kBootServices->AllocatePages(AllocateAnyPages, EfiLoaderData,
|
||||
STAGE_PAGES, &staging) != EFI_SUCCESS)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
*_base = (void*)staging;
|
||||
*_top = (void*)((int8*)staging + STAGE_PAGES * B_PAGE_SIZE);
|
||||
|
||||
return B_OK;
|
||||
kBootServices->FreePages((efi_physical_addr)base, size / B_PAGE_SIZE);
|
||||
}
|
||||
|
@ -150,19 +150,18 @@ platform_free_region(void *address, size_t size)
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
platform_release_heap(struct stage2_args *args, void *base)
|
||||
ssize_t
|
||||
platform_allocate_heap_region(size_t size, void **_base)
|
||||
{
|
||||
// It will be freed automatically, since it is in the
|
||||
// identity mapped region, and not stored in the kernel's
|
||||
// page tables.
|
||||
return B_UNSUPPORTED;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
platform_init_heap(struct stage2_args *args, void **_base, void **_top)
|
||||
void
|
||||
platform_free_heap_region(void *_base, size_t size)
|
||||
{
|
||||
return B_UNSUPPORTED;
|
||||
// Failures don't matter very much as regions should be freed automatically,
|
||||
// since they're in the identity map and not stored in the kernel's page tables.
|
||||
}
|
||||
|
||||
|
||||
|
@ -20,29 +20,25 @@
|
||||
#endif
|
||||
|
||||
|
||||
status_t
|
||||
platform_init_heap(stage2_args *args, void **_base, void **_top)
|
||||
ssize_t
|
||||
platform_allocate_heap_region(size_t size, void **_base)
|
||||
{
|
||||
TRACE(("platform_init_heap()\n"));
|
||||
TRACE(("platform_allocate_heap_region()\n"));
|
||||
|
||||
*_base = NULL;
|
||||
status_t error = platform_allocate_region(_base, args->heap_size,
|
||||
status_t error = platform_allocate_region(_base, size,
|
||||
B_READ_AREA | B_WRITE_AREA, false);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
printf("heap base = %p\n", *_base);
|
||||
*_top = (void *)((int8 *)*_base + args->heap_size);
|
||||
printf("heap top = %p\n", *_top);
|
||||
|
||||
return B_OK;
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
platform_release_heap(stage2_args *args, void *base)
|
||||
platform_free_heap_region(void *_base, size_t size)
|
||||
{
|
||||
if (base != NULL)
|
||||
platform_free_region(base, args->heap_size);
|
||||
if (_base != NULL)
|
||||
platform_free_region(_base, size);
|
||||
}
|
||||
|
||||
|
@ -352,22 +352,21 @@ platform_free_region(void* address, size_t size)
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
platform_release_heap(struct stage2_args* args, void* base)
|
||||
ssize_t
|
||||
platform_allocate_heap_region(size_t size, void **_base)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
platform_init_heap(struct stage2_args* args, void** _base, void** _top)
|
||||
{
|
||||
addr_t heap = AllocPhysPages(args->heap_size);
|
||||
addr_t heap = AllocPhysPages(size);
|
||||
if (heap == 0)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
*_base = (void*)heap;
|
||||
*_top = (void*)(heap + args->heap_size);
|
||||
return B_OK;
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
platform_free_heap_region(void *_base, size_t size)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
|
@ -102,7 +102,8 @@ static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
|
||||
//static addr_t sMaxVirtualAddress = KERNEL_BASE + kMaxKernelSize;
|
||||
|
||||
// working page directory and page table
|
||||
static void *sPageTable = 0 ;
|
||||
static void *sPageTable = 0;
|
||||
static bool sHeapRegionAllocated = false;
|
||||
|
||||
|
||||
static addr_t
|
||||
@ -404,25 +405,28 @@ platform_free_region(void *address, size_t size)
|
||||
}
|
||||
|
||||
|
||||
ssize_t
|
||||
platform_allocate_heap_region(size_t size, void **_base)
|
||||
{
|
||||
if (sHeapRegionAllocated)
|
||||
return B_NO_MEMORY;
|
||||
sHeapRegionAllocated = true;
|
||||
|
||||
// the heap is put right before the pagetable
|
||||
void *heap = (uint8 *)sPageTable - size;
|
||||
//FIXME: use phys addresses to allow passing args to U-Boot?
|
||||
|
||||
*_base = heap;
|
||||
TRACE(("boot heap at 0x%p\n", *_base));
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
platform_release_heap(struct stage2_args *args, void *base)
|
||||
platform_free_heap_region(void *_base, size_t size)
|
||||
{
|
||||
//XXX
|
||||
// It will be freed automatically, since it is in the
|
||||
// identity mapped region, and not stored in the kernel's
|
||||
// page tables.
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
platform_init_heap(struct stage2_args *args, void **_base, void **_top)
|
||||
{
|
||||
// the heap is put right before the pagetable
|
||||
void *heap = (uint8 *)sPageTable - args->heap_size;
|
||||
//FIXME: use phys addresses to allow passing args to U-Boot?
|
||||
|
||||
*_base = heap;
|
||||
*_top = (void *)((int8 *)heap + args->heap_size);
|
||||
TRACE(("boot heap at 0x%p to 0x%p\n", *_base, *_top));
|
||||
return B_OK;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user