x86: Allocate as much initial physical page pools as needed

This commit is contained in:
Pawel Dziepak 2013-12-02 03:19:25 +01:00
parent 255b601750
commit 2e3cbcfa8a
6 changed files with 106 additions and 46 deletions

View File

@ -15,6 +15,7 @@
#include <AutoDeleter.h>
#include <arch/smp.h>
#include <arch_system_info.h>
#include <boot/kernel_args.h>
#include <int.h>
@ -36,6 +37,10 @@
#endif
#define MAX_INITIAL_POOLS \
((SMP_MAX_CPUS * TOTAL_SLOTS_PER_CPU + EXTRA_SLOTS + 1023) / 1024)
using X86LargePhysicalPageMapper::PhysicalPageSlot;
@ -60,7 +65,7 @@ public:
addr_t virtualAddress);
public:
static PhysicalPageSlotPool sInitialPhysicalPagePool;
static PhysicalPageSlotPool sInitialPhysicalPagePool[MAX_INITIAL_POOLS];
private:
area_id fDataArea;
@ -71,7 +76,8 @@ private:
X86PagingMethod32Bit::PhysicalPageSlotPool
X86PagingMethod32Bit::PhysicalPageSlotPool::sInitialPhysicalPagePool;
X86PagingMethod32Bit::PhysicalPageSlotPool::sInitialPhysicalPagePool[
MAX_INITIAL_POOLS];
X86PagingMethod32Bit::PhysicalPageSlotPool::~PhysicalPageSlotPool()
@ -287,20 +293,23 @@ X86PagingMethod32Bit::Init(kernel_args* args,
X86PagingStructures32Bit::StaticInit();
// create the initial pool for the physical page mapper
// create the initial pools for the physical page mapper
int32 poolCount = _GetInitialPoolCount();
PhysicalPageSlotPool* pool
= new(&PhysicalPageSlotPool::sInitialPhysicalPagePool)
PhysicalPageSlotPool;
status_t error = pool->InitInitial(args);
if (error != B_OK) {
panic("X86PagingMethod32Bit::Init(): Failed to create initial pool "
"for physical page mapper!");
return error;
PhysicalPageSlotPool[poolCount];
for (int32 i = 0; i < poolCount; i++) {
status_t error = pool[i].InitInitial(args);
if (error != B_OK) {
panic("X86PagingMethod32Bit::Init(): Failed to create initial pool "
"for physical page mapper!");
return error;
}
}
// create physical page mapper
large_memory_physical_page_ops_init(args, pool, fPhysicalPageMapper,
fKernelPhysicalPageMapper);
large_memory_physical_page_ops_init(args, pool, poolCount, sizeof(*pool),
fPhysicalPageMapper, fKernelPhysicalPageMapper);
// TODO: Select the best page mapper!
// enable global page feature if available
@ -337,10 +346,13 @@ X86PagingMethod32Bit::InitPostArea(kernel_args* args)
if (area < B_OK)
return area;
error = PhysicalPageSlotPool::sInitialPhysicalPagePool
.InitInitialPostArea(args);
if (error != B_OK)
return error;
int32 poolCount = _GetInitialPoolCount();
for (int32 i = 0; i < poolCount; i++) {
status_t error = PhysicalPageSlotPool::sInitialPhysicalPagePool[i]
.InitInitialPostArea(args);
if (error != B_OK)
return error;
}
return B_OK;
}
@ -517,6 +529,15 @@ X86PagingMethod32Bit::PutPageTableEntryInTable(page_table_entry* entry,
}
inline int32
X86PagingMethod32Bit::_GetInitialPoolCount()
{
int32 requiredSlots = smp_get_num_cpus() * TOTAL_SLOTS_PER_CPU
+ EXTRA_SLOTS;
return (requiredSlots + 1023) / 1024;
}
/*static*/ void
X86PagingMethod32Bit::_EarlyPreparePageTables(page_table_entry* pageTables,
addr_t address, size_t size)

View File

@ -80,6 +80,8 @@ private:
friend struct PhysicalPageSlotPool;
private:
inline int32 _GetInitialPoolCount();
static void _EarlyPreparePageTables(
page_table_entry* pageTables,
addr_t address, size_t size);

View File

@ -15,6 +15,7 @@
#include <AutoDeleter.h>
#include <arch/smp.h>
#include <boot/kernel_args.h>
#include <util/AutoLock.h>
#include <vm/vm.h>
@ -40,6 +41,11 @@
#if B_HAIKU_PHYSICAL_BITS == 64
#define MAX_INITIAL_POOLS \
((SMP_MAX_CPUS * TOTAL_SLOTS_PER_CPU + EXTRA_SLOTS \
+ kPAEPageTableEntryCount - 1) / kPAEPageTableEntryCount)
using X86LargePhysicalPageMapper::PhysicalPageSlot;
@ -364,7 +370,7 @@ public:
addr_t virtualAddress);
public:
static PhysicalPageSlotPool sInitialPhysicalPagePool;
static PhysicalPageSlotPool sInitialPhysicalPagePool[MAX_INITIAL_POOLS];
private:
area_id fDataArea;
@ -375,7 +381,8 @@ private:
X86PagingMethodPAE::PhysicalPageSlotPool
X86PagingMethodPAE::PhysicalPageSlotPool::sInitialPhysicalPagePool;
X86PagingMethodPAE::PhysicalPageSlotPool::sInitialPhysicalPagePool[
MAX_INITIAL_POOLS];
X86PagingMethodPAE::PhysicalPageSlotPool::~PhysicalPageSlotPool()
@ -585,20 +592,23 @@ X86PagingMethodPAE::Init(kernel_args* args,
fEarlyPageStructuresSize, fKernelVirtualPageDirs,
fKernelPhysicalPageDirs, fFreeVirtualSlot, fFreeVirtualSlotPTE);
// create the initial pool for the physical page mapper
// create the initial pools for the physical page mapper
int32 poolCount = _GetInitialPoolCount();
PhysicalPageSlotPool* pool
= new(&PhysicalPageSlotPool::sInitialPhysicalPagePool)
PhysicalPageSlotPool;
status_t error = pool->InitInitial(this, args);
if (error != B_OK) {
panic("X86PagingMethodPAE::Init(): Failed to create initial pool "
"for physical page mapper!");
return error;
PhysicalPageSlotPool[poolCount];
for (int32 i = 0; i < poolCount; i++) {
status_t error = pool[i].InitInitial(this, args);
if (error != B_OK) {
panic("X86PagingMethodPAE::Init(): Failed to create initial pool "
"for physical page mapper!");
return error;
}
}
// create physical page mapper
large_memory_physical_page_ops_init(args, pool, fPhysicalPageMapper,
fKernelPhysicalPageMapper);
large_memory_physical_page_ops_init(args, pool, poolCount, sizeof(*pool),
fPhysicalPageMapper, fKernelPhysicalPageMapper);
*_physicalPageMapper = fPhysicalPageMapper;
return B_OK;
@ -615,11 +625,14 @@ X86PagingMethodPAE::InitPostArea(kernel_args* args)
if (area < B_OK)
return area;
// let the initial page pool create areas for its structures
status_t error = PhysicalPageSlotPool::sInitialPhysicalPagePool
.InitInitialPostArea(args);
if (error != B_OK)
return error;
// let the initial page pools create areas for its structures
int32 poolCount = _GetInitialPoolCount();
for (int32 i = 0; i < poolCount; i++) {
status_t error = PhysicalPageSlotPool::sInitialPhysicalPagePool[i]
.InitInitialPostArea(args);
if (error != B_OK)
return error;
}
// The early physical page mapping mechanism is no longer needed. Unmap the
// slot.
@ -884,6 +897,16 @@ X86PagingMethodPAE::Free32BitPage(void* address, phys_addr_t physicalAddress,
}
inline int32
X86PagingMethodPAE::_GetInitialPoolCount()
{
int32 requiredSlots = smp_get_num_cpus() * TOTAL_SLOTS_PER_CPU
+ EXTRA_SLOTS;
return (requiredSlots + kPAEPageTableEntryCount - 1)
/ kPAEPageTableEntryCount;
}
bool
X86PagingMethodPAE::_EarlyQuery(addr_t virtualAddress,
phys_addr_t* _physicalAddress)

View File

@ -100,6 +100,8 @@ private:
friend struct PhysicalPageSlotPool;
private:
inline int32 _GetInitialPoolCount();
bool _EarlyQuery(addr_t virtualAddress,
phys_addr_t* _physicalAddress);
pae_page_table_entry* _EarlyGetPageTable(phys_addr_t address);

View File

@ -48,12 +48,6 @@
// a little longer, thus avoiding re-mapping.
#define SLOTS_PER_TRANSLATION_MAP 4
#define USER_SLOTS_PER_CPU 16
#define KERNEL_SLOTS_PER_CPU 16
#define TOTAL_SLOTS_PER_CPU (USER_SLOTS_PER_CPU \
+ KERNEL_SLOTS_PER_CPU + 1)
// one slot is for use in interrupts
using X86LargePhysicalPageMapper::PhysicalPageSlot;
using X86LargePhysicalPageMapper::PhysicalPageSlotPool;
@ -125,9 +119,10 @@ public:
LargeMemoryPhysicalPageMapper();
status_t Init(kernel_args* args,
PhysicalPageSlotPool* initialPool,
TranslationMapPhysicalPageMapper*&
_kernelPageMapper);
PhysicalPageSlotPool* initialPools,
int32 initalPoolCount, size_t poolSize,
TranslationMapPhysicalPageMapper*&
_kernelPageMapper);
virtual status_t CreateTranslationMapPhysicalPageMapper(
TranslationMapPhysicalPageMapper** _mapper);
@ -428,11 +423,16 @@ LargeMemoryPhysicalPageMapper::LargeMemoryPhysicalPageMapper()
status_t
LargeMemoryPhysicalPageMapper::Init(kernel_args* args,
PhysicalPageSlotPool* initialPool,
PhysicalPageSlotPool* initialPools, int32 initialPoolCount, size_t poolSize,
TranslationMapPhysicalPageMapper*& _kernelPageMapper)
{
fInitialPool = initialPool;
fNonEmptyPools.Add(fInitialPool);
ASSERT(initialPoolCount >= 1);
fInitialPool = initialPools;
for (int32 i = 0; i < initialPoolCount; i++) {
uint8* pointer = (uint8*)initialPools + i * poolSize;
fNonEmptyPools.Add((PhysicalPageSlotPool*)pointer);
}
// get the debug slot
GetSlot(true, fDebugSlot);
@ -755,12 +755,14 @@ LargeMemoryPhysicalPageMapper::GetSlotQueue(int32 cpu, bool user)
status_t
large_memory_physical_page_ops_init(kernel_args* args,
X86LargePhysicalPageMapper::PhysicalPageSlotPool* initialPool,
X86LargePhysicalPageMapper::PhysicalPageSlotPool* initialPools,
int32 initialPoolCount, size_t poolSize,
X86PhysicalPageMapper*& _pageMapper,
TranslationMapPhysicalPageMapper*& _kernelPageMapper)
{
new(&sPhysicalPageMapper) LargeMemoryPhysicalPageMapper;
sPhysicalPageMapper.Init(args, initialPool, _kernelPageMapper);
sPhysicalPageMapper.Init(args, initialPools, initialPoolCount, poolSize,
_kernelPageMapper);
_pageMapper = &sPhysicalPageMapper;
return B_OK;

View File

@ -11,6 +11,15 @@
#include <util/DoublyLinkedList.h>
#define USER_SLOTS_PER_CPU 16
#define KERNEL_SLOTS_PER_CPU 16
#define TOTAL_SLOTS_PER_CPU (USER_SLOTS_PER_CPU \
+ KERNEL_SLOTS_PER_CPU + 1)
// one slot is for use in interrupts
#define EXTRA_SLOTS 2
class TranslationMapPhysicalPageMapper;
class X86PhysicalPageMapper;
struct kernel_args;
@ -53,7 +62,8 @@ protected:
status_t large_memory_physical_page_ops_init(kernel_args* args,
X86LargePhysicalPageMapper::PhysicalPageSlotPool* initialPool,
X86LargePhysicalPageMapper::PhysicalPageSlotPool* initialPools,
int32 initialPoolCount, size_t poolSize,
X86PhysicalPageMapper*& _pageMapper,
TranslationMapPhysicalPageMapper*& _kernelPageMapper);