mirror of
https://review.haiku-os.org/haiku
synced 2025-01-18 12:38:51 +01:00
kernel/vm: Use allocate_early instead of passing a get_free_page method to early_map.
Simplifies things significantly. No functional change intended. Change-Id: Ia14c2ec72038ad6f8b56a14974dca78e1877063c Reviewed-on: https://review.haiku-os.org/c/haiku/+/8725 Reviewed-by: waddlesplash <waddlesplash@gmail.com>
This commit is contained in:
parent
0c2ad1411f
commit
fccefbf347
@ -25,10 +25,9 @@ status_t arch_vm_translation_map_init_post_area(struct kernel_args *args);
|
||||
status_t arch_vm_translation_map_init_post_sem(struct kernel_args *args);
|
||||
|
||||
// Quick function to map a page in regardless of map context. Used in VM
|
||||
// initialization before most vm data structures exist.
|
||||
// initialization before most VM data structures exist.
|
||||
status_t arch_vm_translation_map_early_map(struct kernel_args *args, addr_t va,
|
||||
phys_addr_t pa, uint8 attributes,
|
||||
phys_addr_t (*get_free_page)(struct kernel_args *));
|
||||
phys_addr_t pa, uint8 attributes);
|
||||
|
||||
bool arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
|
||||
uint32 protection);
|
||||
|
@ -58,8 +58,7 @@ status_t vm_init_post_thread(struct kernel_args *args);
|
||||
status_t vm_init_post_modules(struct kernel_args *args);
|
||||
void vm_free_kernel_args(struct kernel_args *args);
|
||||
void vm_free_unused_boot_loader_range(addr_t start, addr_t end);
|
||||
page_num_t vm_allocate_early_physical_page(kernel_args *args);
|
||||
page_num_t vm_allocate_early_physical_page_etc(kernel_args *args, phys_addr_t maxAddress = 0);
|
||||
page_num_t vm_allocate_early_physical_page(kernel_args *args, phys_addr_t maxAddress = 0);
|
||||
addr_t vm_allocate_early(struct kernel_args *args, size_t virtualSize,
|
||||
size_t physicalSize, uint32 attributes, addr_t alignment);
|
||||
|
||||
|
@ -101,11 +101,11 @@ arch_vm_translation_map_init_post_area(kernel_args *args)
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
|
||||
uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *))
|
||||
uint8 attributes)
|
||||
{
|
||||
TRACE("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
|
||||
|
||||
return gARMPagingMethod->MapEarly(args, va, pa, attributes, get_free_page);
|
||||
return gARMPagingMethod->MapEarly(args, va, pa, attributes);
|
||||
}
|
||||
|
||||
|
||||
|
@ -369,8 +369,7 @@ get_free_pgtable(kernel_args* args, phys_addr_t* phys_addr, addr_t* virt_addr)
|
||||
|
||||
status_t
|
||||
ARMPagingMethod32Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress, uint8 attributes,
|
||||
page_num_t (*get_free_page)(kernel_args*))
|
||||
phys_addr_t physicalAddress, uint8 attributes)
|
||||
{
|
||||
// check to see if a page table exists for this range
|
||||
int index = VADDR_TO_PDENT(virtualAddress);
|
||||
|
@ -33,8 +33,7 @@ public:
|
||||
virtual status_t MapEarly(kernel_args* args,
|
||||
addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint8 attributes,
|
||||
page_num_t (*get_free_page)(kernel_args*));
|
||||
uint8 attributes);
|
||||
|
||||
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
|
||||
uint32 protection);
|
||||
|
@ -31,9 +31,7 @@ public:
|
||||
virtual status_t MapEarly(kernel_args* args,
|
||||
addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint8 attributes,
|
||||
page_num_t (*get_free_page)(kernel_args*))
|
||||
= 0;
|
||||
uint8 attributes) = 0;
|
||||
|
||||
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
|
||||
uint32 protection) = 0;
|
||||
|
@ -155,7 +155,7 @@ TableFromPa(phys_addr_t pa)
|
||||
|
||||
static void
|
||||
map_page_early(phys_addr_t ptPa, int level, addr_t va, phys_addr_t pa,
|
||||
phys_addr_t (*get_free_page)(kernel_args*), kernel_args* args)
|
||||
kernel_args* args)
|
||||
{
|
||||
int tableBits = page_bits - 3;
|
||||
uint64_t tableMask = (1UL << tableBits) - 1;
|
||||
@ -176,7 +176,7 @@ map_page_early(phys_addr_t ptPa, int level, addr_t va, phys_addr_t pa,
|
||||
if (type == 0x3) {
|
||||
table = pteVal & kPteAddrMask;
|
||||
} else {
|
||||
table = get_free_page(args) << page_bits;
|
||||
table = vm_allocate_early_physical_page(args) << page_bits;
|
||||
dprintf("early: pulling page %lx\n", table);
|
||||
uint64_t* newTableVa = TableFromPa(table);
|
||||
|
||||
@ -195,14 +195,13 @@ map_page_early(phys_addr_t ptPa, int level, addr_t va, phys_addr_t pa,
|
||||
atomic_set64((int64*) pte, table | 0x3);
|
||||
}
|
||||
|
||||
map_page_early(table, level + 1, va, pa, get_free_page, args);
|
||||
map_page_early(table, level + 1, va, pa, args);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_early_map(kernel_args* args, addr_t va, phys_addr_t pa, uint8 attributes,
|
||||
phys_addr_t (*get_free_page)(kernel_args*))
|
||||
arch_vm_translation_map_early_map(kernel_args* args, addr_t va, phys_addr_t pa, uint8 attributes)
|
||||
{
|
||||
int va_bits = 64 - tsz;
|
||||
uint64_t va_mask = (1UL << va_bits) - 1;
|
||||
@ -213,7 +212,7 @@ arch_vm_translation_map_early_map(kernel_args* args, addr_t va, phys_addr_t pa,
|
||||
va &= va_mask;
|
||||
pa |= VMSAv8TranslationMap::GetMemoryAttr(attributes, 0, true);
|
||||
|
||||
map_page_early(ptPa, level, va, pa, get_free_page, args);
|
||||
map_page_early(ptPa, level, va, pa, args);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
@ -144,11 +144,11 @@ arch_vm_translation_map_init_post_area(kernel_args *args)
|
||||
*/
|
||||
status_t
|
||||
arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
|
||||
uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *))
|
||||
uint8 attributes)
|
||||
{
|
||||
TRACE("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
|
||||
|
||||
return gM68KPagingMethod->MapEarly(args, va, pa, attributes, get_free_page);
|
||||
return gM68KPagingMethod->MapEarly(args, va, pa, attributes);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1446,7 +1446,7 @@ m68k_vm_translation_map_init_post_area(kernel_args *args)
|
||||
|
||||
static status_t
|
||||
m68k_vm_translation_map_early_map(kernel_args *args, addr_t va, addr_t pa,
|
||||
uint8 attributes, addr_t (*get_free_page)(kernel_args *))
|
||||
uint8 attributes)
|
||||
{
|
||||
page_root_entry *pr = (page_root_entry *)sKernelPhysicalPageRoot;
|
||||
page_directory_entry *pd;
|
||||
@ -1463,7 +1463,7 @@ m68k_vm_translation_map_early_map(kernel_args *args, addr_t va, addr_t pa,
|
||||
if (pr[index].type != DT_ROOT) {
|
||||
unsigned aindex = index & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
|
||||
TRACE(("missing page root entry %d ai %d\n", index, aindex));
|
||||
tbl = get_free_page(args) * B_PAGE_SIZE;
|
||||
tbl = vm_allocate_early_physical_page(args) * B_PAGE_SIZE;
|
||||
if (!tbl)
|
||||
return ENOMEM;
|
||||
TRACE(("early_map: asked for free page for pgdir. 0x%lx\n", tbl));
|
||||
@ -1487,7 +1487,7 @@ m68k_vm_translation_map_early_map(kernel_args *args, addr_t va, addr_t pa,
|
||||
if (pd[index].type != DT_DIR) {
|
||||
unsigned aindex = index & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
|
||||
TRACE(("missing page dir entry %d ai %d\n", index, aindex));
|
||||
tbl = get_free_page(args) * B_PAGE_SIZE;
|
||||
tbl = vm_allocate_early_physical_page(args) * B_PAGE_SIZE;
|
||||
if (!tbl)
|
||||
return ENOMEM;
|
||||
TRACE(("early_map: asked for free page for pgtable. 0x%lx\n", tbl));
|
||||
|
@ -467,8 +467,7 @@ M68KPagingMethod040::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
|
||||
|
||||
status_t
|
||||
M68KPagingMethod040::MapEarly(kernel_args* args, addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress, uint8 attributes,
|
||||
phys_addr_t (*get_free_page)(kernel_args*))
|
||||
phys_addr_t physicalAddress, uint8 attributes)
|
||||
{
|
||||
// XXX horrible back door to map a page quickly regardless of translation
|
||||
// map object, etc. used only during VM setup.
|
||||
@ -494,7 +493,7 @@ M68KPagingMethod040::MapEarly(kernel_args* args, addr_t virtualAddress,
|
||||
if (PRE_TYPE(pr[index]) != DT_ROOT) {
|
||||
unsigned aindex = index & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
|
||||
TRACE("missing page root entry %d ai %d\n", index, aindex);
|
||||
tbl = get_free_page(args) * B_PAGE_SIZE;
|
||||
tbl = vm_allocate_early_physical_page(args) * B_PAGE_SIZE;
|
||||
if (!tbl)
|
||||
return ENOMEM;
|
||||
TRACE("040::MapEarly: asked for free page for pgdir. 0x%lx\n", tbl);
|
||||
@ -518,7 +517,7 @@ M68KPagingMethod040::MapEarly(kernel_args* args, addr_t virtualAddress,
|
||||
if (PDE_TYPE(pd[index]) != DT_DIR) {
|
||||
unsigned aindex = index & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
|
||||
TRACE("missing page dir entry %d ai %d\n", index, aindex);
|
||||
tbl = get_free_page(args) * B_PAGE_SIZE;
|
||||
tbl = vm_allocate_early_physical_page(args) * B_PAGE_SIZE;
|
||||
if (!tbl)
|
||||
return ENOMEM;
|
||||
TRACE("early_map: asked for free page for pgtable. 0x%lx\n", tbl);
|
||||
@ -555,7 +554,7 @@ M68KPagingMethod040::MapEarly(kernel_args* args, addr_t virtualAddress,
|
||||
phys_addr_t pgtable;
|
||||
page_directory_entry *e;
|
||||
// we need to allocate a pgtable
|
||||
pgtable = get_free_page(args);
|
||||
pgtable = vm_allocate_early_physical_page(args);
|
||||
// pgtable is in pages, convert to physical address
|
||||
pgtable *= B_PAGE_SIZE;
|
||||
|
||||
|
@ -30,8 +30,7 @@ public:
|
||||
virtual status_t MapEarly(kernel_args* args,
|
||||
addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint8 attributes,
|
||||
phys_addr_t (*get_free_page)(kernel_args*));
|
||||
uint8 attributes);
|
||||
|
||||
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
|
||||
uint32 protection);
|
||||
|
@ -29,9 +29,7 @@ public:
|
||||
virtual status_t MapEarly(kernel_args* args,
|
||||
addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint8 attributes,
|
||||
phys_addr_t (*get_free_page)(kernel_args*))
|
||||
= 0;
|
||||
uint8 attributes) = 0;
|
||||
|
||||
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
|
||||
uint32 protection) = 0;
|
||||
|
@ -283,12 +283,12 @@ arch_vm_translation_map_init_post_sem(kernel_args *args)
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
|
||||
uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *))
|
||||
uint8 attributes)
|
||||
{
|
||||
TRACE("early_tmap: entry pa %#" B_PRIxPHYSADDR " va %#" B_PRIxADDR "\n", pa,
|
||||
va);
|
||||
|
||||
return gPPCPagingMethod->MapEarly(args, va, pa, attributes, get_free_page);
|
||||
return gPPCPagingMethod->MapEarly(args, va, pa, attributes);
|
||||
}
|
||||
|
||||
|
||||
|
@ -236,8 +236,7 @@ PPCPagingMethod460::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
|
||||
|
||||
status_t
|
||||
PPCPagingMethod460::MapEarly(kernel_args* args, addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress, uint8 attributes,
|
||||
page_num_t (*get_free_page)(kernel_args*))
|
||||
phys_addr_t physicalAddress, uint8 attributes)
|
||||
{
|
||||
uint32 virtualSegmentID = get_sr((void *)virtualAddress) & 0xffffff;
|
||||
|
||||
|
@ -31,8 +31,7 @@ public:
|
||||
virtual status_t MapEarly(kernel_args* args,
|
||||
addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint8 attributes,
|
||||
page_num_t (*get_free_page)(kernel_args*));
|
||||
uint8 attributes);
|
||||
|
||||
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
|
||||
uint32 protection);
|
||||
|
@ -31,9 +31,7 @@ public:
|
||||
virtual status_t MapEarly(kernel_args* args,
|
||||
addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint8 attributes,
|
||||
page_num_t (*get_free_page)(kernel_args*))
|
||||
= 0;
|
||||
uint8 attributes) = 0;
|
||||
|
||||
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
|
||||
uint32 protection) = 0;
|
||||
|
@ -236,8 +236,7 @@ PPCPagingMethodClassic::CreateTranslationMap(bool kernel, VMTranslationMap** _ma
|
||||
|
||||
status_t
|
||||
PPCPagingMethodClassic::MapEarly(kernel_args* args, addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress, uint8 attributes,
|
||||
page_num_t (*get_free_page)(kernel_args*))
|
||||
phys_addr_t physicalAddress, uint8 attributes)
|
||||
{
|
||||
uint32 virtualSegmentID = get_sr((void *)virtualAddress) & 0xffffff;
|
||||
|
||||
|
@ -31,8 +31,7 @@ public:
|
||||
virtual status_t MapEarly(kernel_args* args,
|
||||
addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint8 attributes,
|
||||
page_num_t (*get_free_page)(kernel_args*));
|
||||
uint8 attributes);
|
||||
|
||||
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
|
||||
uint32 protection);
|
||||
|
@ -42,8 +42,7 @@ char sPhysicalPageMapperData[sizeof(RISCV64VMPhysicalPageMapper)];
|
||||
// TODO: Consolidate function with RISCV64VMTranslationMap
|
||||
|
||||
static Pte*
|
||||
LookupPte(addr_t virtAdr, bool alloc, kernel_args* args,
|
||||
phys_addr_t (*get_free_page)(kernel_args *))
|
||||
LookupPte(addr_t virtAdr, bool alloc, kernel_args* args)
|
||||
{
|
||||
Pte *pte = (Pte*)VirtFromPhys(sPageTable);
|
||||
for (int level = 2; level > 0; level --) {
|
||||
@ -51,7 +50,7 @@ LookupPte(addr_t virtAdr, bool alloc, kernel_args* args,
|
||||
if (!pte->isValid) {
|
||||
if (!alloc)
|
||||
return NULL;
|
||||
page_num_t ppn = get_free_page(args);
|
||||
page_num_t ppn = vm_allocate_early_physical_page(args);
|
||||
if (ppn == 0)
|
||||
return NULL;
|
||||
memset((Pte*)VirtFromPhys(B_PAGE_SIZE * ppn), 0, B_PAGE_SIZE);
|
||||
@ -70,11 +69,10 @@ LookupPte(addr_t virtAdr, bool alloc, kernel_args* args,
|
||||
|
||||
|
||||
static void
|
||||
Map(addr_t virtAdr, phys_addr_t physAdr, uint64 flags, kernel_args* args,
|
||||
phys_addr_t (*get_free_page)(kernel_args *))
|
||||
Map(addr_t virtAdr, phys_addr_t physAdr, uint64 flags, kernel_args* args)
|
||||
{
|
||||
// dprintf("Map(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR ")\n", virtAdr, physAdr);
|
||||
Pte* pte = LookupPte(virtAdr, true, args, get_free_page);
|
||||
Pte* pte = LookupPte(virtAdr, true, args);
|
||||
if (pte == NULL) panic("can't allocate page table");
|
||||
|
||||
Pte newPte {
|
||||
@ -162,8 +160,7 @@ arch_vm_translation_map_init_post_area(kernel_args *args)
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_early_map(kernel_args *args,
|
||||
addr_t virtAdr, phys_addr_t physAdr, uint8 attributes,
|
||||
phys_addr_t (*get_free_page)(kernel_args *))
|
||||
addr_t virtAdr, phys_addr_t physAdr, uint8 attributes)
|
||||
{
|
||||
//dprintf("early_map(%#" B_PRIxADDR ", %#" B_PRIxADDR ")\n", virtAdr, physAdr);
|
||||
Pte flags {
|
||||
@ -171,7 +168,7 @@ arch_vm_translation_map_early_map(kernel_args *args,
|
||||
.isWrite = (attributes & B_KERNEL_WRITE_AREA) != 0,
|
||||
.isExec = (attributes & B_KERNEL_EXECUTE_AREA) != 0,
|
||||
};
|
||||
Map(virtAdr, physAdr, flags.val, args, get_free_page);
|
||||
Map(virtAdr, physAdr, flags.val, args);
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
@ -77,7 +77,7 @@ arch_vm_translation_map_init_post_area(kernel_args *args)
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
|
||||
uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *))
|
||||
uint8 attributes)
|
||||
{
|
||||
TRACE("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
|
||||
return B_OK;
|
||||
|
@ -143,12 +143,12 @@ arch_vm_translation_map_init_post_area(kernel_args *args)
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
|
||||
uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *))
|
||||
uint8 attributes)
|
||||
{
|
||||
TRACE("early_tmap: entry pa %#" B_PRIxPHYSADDR " va %#" B_PRIxADDR "\n", pa,
|
||||
va);
|
||||
|
||||
return gX86PagingMethod->MapEarly(args, va, pa, attributes, get_free_page);
|
||||
return gX86PagingMethod->MapEarly(args, va, pa, attributes);
|
||||
}
|
||||
|
||||
|
||||
|
@ -392,8 +392,7 @@ X86PagingMethod32Bit::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
|
||||
|
||||
status_t
|
||||
X86PagingMethod32Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress, uint8 attributes,
|
||||
page_num_t (*get_free_page)(kernel_args*))
|
||||
phys_addr_t physicalAddress, uint8 attributes)
|
||||
{
|
||||
// XXX horrible back door to map a page quickly regardless of translation
|
||||
// map object, etc. used only during VM setup.
|
||||
@ -408,7 +407,7 @@ X86PagingMethod32Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
|
||||
phys_addr_t pgtable;
|
||||
page_directory_entry *e;
|
||||
// we need to allocate a pgtable
|
||||
pgtable = get_free_page(args);
|
||||
pgtable = vm_allocate_early_physical_page(args);
|
||||
// pgtable is in pages, convert to physical address
|
||||
pgtable *= B_PAGE_SIZE;
|
||||
|
||||
|
@ -30,8 +30,7 @@ public:
|
||||
virtual status_t MapEarly(kernel_args* args,
|
||||
addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint8 attributes,
|
||||
page_num_t (*get_free_page)(kernel_args*));
|
||||
uint8 attributes);
|
||||
|
||||
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
|
||||
uint32 protection);
|
||||
|
@ -123,8 +123,7 @@ X86PagingMethod64Bit::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
|
||||
|
||||
status_t
|
||||
X86PagingMethod64Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress, uint8 attributes,
|
||||
page_num_t (*get_free_page)(kernel_args*))
|
||||
phys_addr_t physicalAddress, uint8 attributes)
|
||||
{
|
||||
TRACE("X86PagingMethod64Bit::MapEarly(%#" B_PRIxADDR ", %#" B_PRIxPHYSADDR
|
||||
", %#" B_PRIx8 ")\n", virtualAddress, physicalAddress, attributes);
|
||||
@ -148,7 +147,7 @@ X86PagingMethod64Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
|
||||
uint64* pdpte = &virtualPDPT[VADDR_TO_PDPTE(virtualAddress)];
|
||||
uint64* virtualPageDir;
|
||||
if ((*pdpte & X86_64_PDPTE_PRESENT) == 0) {
|
||||
phys_addr_t physicalPageDir = get_free_page(args) * B_PAGE_SIZE;
|
||||
phys_addr_t physicalPageDir = vm_allocate_early_physical_page(args) * B_PAGE_SIZE;
|
||||
|
||||
TRACE("X86PagingMethod64Bit::MapEarly(): creating page directory for va"
|
||||
" %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n", virtualAddress,
|
||||
@ -172,7 +171,7 @@ X86PagingMethod64Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
|
||||
uint64* pde = &virtualPageDir[VADDR_TO_PDE(virtualAddress)];
|
||||
uint64* virtualPageTable;
|
||||
if ((*pde & X86_64_PDE_PRESENT) == 0) {
|
||||
phys_addr_t physicalPageTable = get_free_page(args) * B_PAGE_SIZE;
|
||||
phys_addr_t physicalPageTable = vm_allocate_early_physical_page(args) * B_PAGE_SIZE;
|
||||
|
||||
TRACE("X86PagingMethod64Bit::MapEarly(): creating page table for va"
|
||||
" %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n", virtualAddress,
|
||||
|
@ -40,8 +40,7 @@ public:
|
||||
virtual status_t MapEarly(kernel_args* args,
|
||||
addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint8 attributes,
|
||||
page_num_t (*get_free_page)(kernel_args*));
|
||||
uint8 attributes);
|
||||
|
||||
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
|
||||
uint32 protection);
|
||||
|
@ -31,9 +31,7 @@ public:
|
||||
virtual status_t MapEarly(kernel_args* args,
|
||||
addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint8 attributes,
|
||||
page_num_t (*get_free_page)(kernel_args*))
|
||||
= 0;
|
||||
uint8 attributes) = 0;
|
||||
|
||||
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
|
||||
uint32 protection) = 0;
|
||||
|
@ -284,7 +284,7 @@ private:
|
||||
phys_addr_t _Allocate32BitPage()
|
||||
{
|
||||
phys_addr_t physicalAddress
|
||||
= (phys_addr_t)vm_allocate_early_physical_page_etc(fKernelArgs, 0xffffffff)
|
||||
= (phys_addr_t)vm_allocate_early_physical_page(fKernelArgs, 0xffffffff)
|
||||
* B_PAGE_SIZE;
|
||||
if (physicalAddress == 0 || physicalAddress > 0xffffffff) {
|
||||
panic("Failed to allocate 32-bit-addressable page for the switch "
|
||||
@ -674,8 +674,7 @@ X86PagingMethodPAE::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
|
||||
|
||||
status_t
|
||||
X86PagingMethodPAE::MapEarly(kernel_args* args, addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress, uint8 attributes,
|
||||
page_num_t (*get_free_page)(kernel_args*))
|
||||
phys_addr_t physicalAddress, uint8 attributes)
|
||||
{
|
||||
// check to see if a page table exists for this range
|
||||
pae_page_directory_entry* pageDirEntry = PageDirEntryForAddress(
|
||||
@ -683,7 +682,7 @@ X86PagingMethodPAE::MapEarly(kernel_args* args, addr_t virtualAddress,
|
||||
pae_page_table_entry* pageTable;
|
||||
if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
|
||||
// we need to allocate a page table
|
||||
phys_addr_t physicalPageTable = get_free_page(args) * B_PAGE_SIZE;
|
||||
phys_addr_t physicalPageTable = vm_allocate_early_physical_page(args) * B_PAGE_SIZE;
|
||||
|
||||
TRACE("X86PagingMethodPAE::MapEarly(): asked for free page for "
|
||||
"page table: %#" B_PRIxPHYSADDR "\n", physicalPageTable);
|
||||
|
@ -38,8 +38,7 @@ public:
|
||||
virtual status_t MapEarly(kernel_args* args,
|
||||
addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint8 attributes,
|
||||
page_num_t (*get_free_page)(kernel_args*));
|
||||
uint8 attributes);
|
||||
|
||||
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
|
||||
uint32 protection);
|
||||
|
@ -3724,14 +3724,7 @@ is_page_in_physical_memory_range(kernel_args* args, phys_addr_t address)
|
||||
|
||||
|
||||
page_num_t
|
||||
vm_allocate_early_physical_page(kernel_args* args)
|
||||
{
|
||||
return vm_allocate_early_physical_page_etc(args);
|
||||
}
|
||||
|
||||
|
||||
page_num_t
|
||||
vm_allocate_early_physical_page_etc(kernel_args* args, phys_addr_t maxAddress)
|
||||
vm_allocate_early_physical_page(kernel_args* args, phys_addr_t maxAddress)
|
||||
{
|
||||
if (args->num_physical_allocated_ranges == 0) {
|
||||
panic("early physical page allocations no longer possible!");
|
||||
@ -3899,8 +3892,7 @@ vm_allocate_early(kernel_args* args, size_t virtualSize, size_t physicalSize,
|
||||
|
||||
status_t status = arch_vm_translation_map_early_map(args,
|
||||
virtualBase + i * B_PAGE_SIZE,
|
||||
physicalAddress * B_PAGE_SIZE, attributes,
|
||||
&vm_allocate_early_physical_page);
|
||||
physicalAddress * B_PAGE_SIZE, attributes);
|
||||
if (status != B_OK)
|
||||
panic("error mapping early page!");
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user