Resize caches in all cases when cutting areas

* Adds VMCache::MovePageRange() and VMCache::Rebase() to facilitate
  this.

Applied on top of hrev45098 and rebased with the hrev45564 page_num_t to
off_t change included.

Change-Id: Ie61bf43696783e3376fb4144ddced3781aa092ba
Reviewed-on: https://review.haiku-os.org/c/haiku/+/2581
Reviewed-by: waddlesplash <waddlesplash@gmail.com>
This commit is contained in:
Hamish Morrison 2012-04-15 18:03:57 +01:00 committed by waddlesplash
parent 905284b397
commit c6657ffe02
6 changed files with 268 additions and 22 deletions

View File

@ -110,8 +110,11 @@ public:
vm_page* LookupPage(off_t offset);
void InsertPage(vm_page* page, off_t offset);
void RemovePage(vm_page* page);
void MovePage(vm_page* page, off_t offset);
void MovePage(vm_page* page);
void MoveAllPages(VMCache* fromCache);
void MovePageRange(VMCache* source, off_t offset,
off_t size, off_t newOffset);
inline page_num_t WiredPagesCount() const;
inline void IncrementWiredPagesCount();
@ -130,6 +133,7 @@ public:
status_t SetMinimalCommitment(off_t commitment,
int priority);
virtual status_t Resize(off_t newSize, int priority);
virtual status_t Rebase(off_t newBase, int priority);
status_t FlushAndRemoveAllPages();

View File

@ -545,6 +545,65 @@ VMAnonymousCache::Resize(off_t newSize, int priority)
}
status_t
VMAnonymousCache::Rebase(off_t newBase, int priority)
{
// If the cache size shrinks, drop all swap pages beyond the new size.
if (fAllocatedSwapSize > 0) {
off_t basePage = newBase >> PAGE_SHIFT;
swap_block* swapBlock = NULL;
for (off_t pageIndex = 0;
pageIndex < basePage && fAllocatedSwapSize > 0;
pageIndex++) {
WriteLocker locker(sSwapHashLock);
// Get the swap slot index for the page.
swap_addr_t blockIndex = pageIndex & SWAP_BLOCK_MASK;
if (swapBlock == NULL || blockIndex == 0) {
swap_hash_key key = { this, pageIndex };
swapBlock = sSwapHashTable.Lookup(key);
if (swapBlock == NULL) {
pageIndex = ROUNDUP(pageIndex + 1, SWAP_BLOCK_PAGES);
continue;
}
}
swap_addr_t slotIndex = swapBlock->swap_slots[blockIndex];
vm_page* page;
if (slotIndex != SWAP_SLOT_NONE
&& ((page = LookupPage(pageIndex * B_PAGE_SIZE)) == NULL
|| !page->busy)) {
// TODO: We skip (i.e. leak) swap space of busy pages, since
// there could be I/O going on (paging in/out). Waiting is
// not an option as 1. unlocking the cache means that new
// swap pages could be added in a range we've already
// cleared (since the cache still has the old size) and 2.
// we'd risk a deadlock in case we come from the file cache
// and the FS holds the node's write-lock. We should mark
// the page invalid and let the one responsible clean up.
// There's just no such mechanism yet.
swap_slot_dealloc(slotIndex, 1);
fAllocatedSwapSize -= B_PAGE_SIZE;
swapBlock->swap_slots[blockIndex] = SWAP_SLOT_NONE;
if (--swapBlock->used == 0) {
// All swap pages have been freed -- we can discard the swap
// block.
sSwapHashTable.RemoveUnchecked(swapBlock);
object_cache_free(sSwapBlockCache, swapBlock,
CACHE_DONT_WAIT_FOR_MEMORY
| CACHE_DONT_LOCK_KERNEL_SPACE);
}
}
}
}
return VMCache::Rebase(newBase, priority);
}
status_t
VMAnonymousCache::Commit(off_t size, int priority)
{

View File

@ -40,12 +40,15 @@ public:
uint32 allocationFlags);
virtual status_t Resize(off_t newSize, int priority);
virtual status_t Rebase(off_t newBase, int priority);
virtual status_t Commit(off_t size, int priority);
virtual bool HasPage(off_t offset);
virtual bool DebugHasPage(off_t offset);
virtual int32 GuardSize() { return fGuardedSize; }
virtual void SetGuardSize(int32 guardSize)
{ fGuardedSize = guardSize; }
virtual status_t Read(off_t offset, const generic_io_vec* vecs,
size_t count, uint32 flags,

View File

@ -26,6 +26,8 @@ public:
virtual bool HasPage(off_t offset);
virtual int32 GuardSize() { return fGuardedSize; }
virtual void SetGuardSize(int32 guardSize)
{ fGuardedSize = guardSize; }
virtual status_t Read(off_t offset, const generic_io_vec *vecs,
size_t count,uint32 flags,

View File

@ -187,6 +187,29 @@ class Resize : public VMCacheTraceEntry {
};
class Rebase : public VMCacheTraceEntry {
public:
Rebase(VMCache* cache, off_t base)
:
VMCacheTraceEntry(cache),
fOldBase(cache->virtual_base),
fBase(base)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("vm cache rebase: cache: %p, base: %lld -> %lld", fCache,
fOldBase, fBase);
}
private:
off_t fOldBase;
off_t fBase;
};
class AddConsumer : public VMCacheTraceEntry {
public:
AddConsumer(VMCache* cache, VMCache* consumer)
@ -825,11 +848,12 @@ VMCache::RemovePage(vm_page* page)
}
/*! Moves the given page from its current cache inserts it into this cache.
/*! Moves the given page from its current cache inserts it into this cache
at the given offset.
Both caches must be locked.
*/
void
VMCache::MovePage(vm_page* page)
VMCache::MovePage(vm_page* page, off_t offset)
{
VMCache* oldCache = page->Cache();
@ -841,6 +865,9 @@ VMCache::MovePage(vm_page* page)
oldCache->page_count--;
T2(RemovePage(oldCache, page));
// change the offset
page->cache_offset = offset >> PAGE_SHIFT;
// insert here
pages.Insert(page);
page_count++;
@ -854,6 +881,15 @@ VMCache::MovePage(vm_page* page)
T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
}
/*! Moves the given page from its current cache inserts it into this cache.
Both caches must be locked.
*/
void
VMCache::MovePage(vm_page* page)
{
MovePage(page, page->cache_offset << PAGE_SHIFT);
}
/*! Moves all pages from the given cache to this one.
Both caches must be locked. This cache must be empty.
@ -888,6 +924,27 @@ VMCache::MoveAllPages(VMCache* fromCache)
}
/*! Moves the given pages from their current cache and inserts them into this
cache. Both caches must be locked.
*/
void
VMCache::MovePageRange(VMCache* source, off_t offset, off_t size,
off_t newOffset)
{
page_num_t startPage = offset >> PAGE_SHIFT;
page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
int32 offsetChange = (int32)(newOffset - offset);
VMCachePagesTree::Iterator it = source->pages.GetIterator(startPage, true,
true);
for (vm_page* page = it.Next();
page != NULL && page->cache_offset < endPage;
page = it.Next()) {
MovePage(page, (page->cache_offset << PAGE_SHIFT) + offsetChange);
}
}
/*! Waits until one or more events happened for a given page which belongs to
this cache.
The cache must be locked. It will be unlocked by the method. \a relock
@ -1141,6 +1198,71 @@ VMCache::Resize(off_t newSize, int priority)
return B_OK;
}
/*! This function updates the virtual_base field of the cache.
If needed, it will free up all pages that don't belong to the cache anymore.
The cache lock must be held when you call it.
Since removed pages don't belong to the cache any longer, they are not
written back before they will be removed.
Note, this function may temporarily release the cache lock in case it
has to wait for busy pages.
*/
status_t
VMCache::Rebase(off_t newBase, int priority)
{
TRACE(("VMCache::Rebase(cache %p, newBase %Ld) old base %Ld\n",
this, newBase, this->virtual_base));
this->AssertLocked();
T(Rebase(this, newBase));
status_t status = Commit(virtual_end - newBase, priority);
if (status != B_OK)
return status;
uint32 basePage = (uint32)(newBase >> PAGE_SHIFT);
if (newBase > virtual_base) {
// we need to remove all pages in the cache outside of the new virtual
// size
VMCachePagesTree::Iterator it = pages.GetIterator();
for (vm_page* page = it.Next();
page != NULL && page->cache_offset < basePage;
page = it.Next()) {
if (page->busy) {
if (page->busy_writing) {
// We cannot wait for the page to become available
// as we might cause a deadlock this way
page->busy_writing = false;
// this will notify the writer to free the page
} else {
// wait for page to become unbusy
WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
// restart from the start of the list
it = pages.GetIterator();
}
continue;
}
// remove the page and put it into the free queue
DEBUG_PAGE_ACCESS_START(page);
vm_remove_all_page_mappings(page);
ASSERT(page->WiredCount() == 0);
// TODO: Find a real solution! If the page is wired
// temporarily (e.g. by lock_memory()), we actually must not
// unmap it!
RemovePage(page);
vm_page_free(this, page);
// Note: When iterating through a IteratableSplayTree
// removing the current node is safe.
}
}
virtual_base = newBase;
return B_OK;
}
/*! You have to call this function with the VMCache lock held. */
status_t

View File

@ -676,9 +676,10 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
addr_t oldBase = area->Base();
addr_t newBase = lastAddress + 1;
size_t newSize = areaLast - lastAddress;
size_t newOffset = newBase - oldBase;
// unmap pages
unmap_pages(area, oldBase, newBase - oldBase);
unmap_pages(area, oldBase, newOffset);
// resize the area
status_t error = addressSpace->ShrinkAreaHead(area, newSize,
@ -686,9 +687,17 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
if (error != B_OK)
return error;
// TODO: If no one else uses the area's cache, we should resize it, too!
area->cache_offset += newBase - oldBase;
// If no one else uses the area's cache, we can resize it, too.
if (cache->areas == area && area->cache_next == NULL
&& cache->consumers.IsEmpty()
&& cache->type == CACHE_TYPE_RAM) {
// Since VMCache::Rebase() can temporarily drop the lock, we must
// unlock all lower caches to prevent locking order inversion.
cacheChainLocker.Unlock(cache);
cache->Rebase(cache->virtual_base + newOffset, priority);
cache->ReleaseRefAndUnlock();
}
area->cache_offset += newOffset;
return B_OK;
}
@ -696,7 +705,6 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
// The tough part -- cut a piece out of the middle of the area.
// We do that by shrinking the area to the begin section and creating a
// new area for the end section.
addr_t firstNewSize = address - area->Base();
addr_t secondBase = lastAddress + 1;
addr_t secondSize = areaLast - lastAddress;
@ -711,26 +719,74 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
if (error != B_OK)
return error;
// TODO: If no one else uses the area's cache, we might want to create a
// new cache for the second area, transfer the concerned pages from the
// first cache to it and resize the first cache.
// map the second area
virtual_address_restrictions addressRestrictions = {};
addressRestrictions.address = (void*)secondBase;
addressRestrictions.address_specification = B_EXACT_ADDRESS;
VMArea* secondArea;
error = map_backing_store(addressSpace, cache,
area->cache_offset + (secondBase - area->Base()), area->name,
secondSize, area->wiring, area->protection, REGION_NO_PRIVATE_MAP, 0,
&addressRestrictions, kernel, &secondArea, NULL);
if (error != B_OK) {
addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
return error;
}
// We need a cache reference for the new area.
cache->AcquireRefLocked();
// If no one else uses the area's cache and it's an anonymous cache, we
// can split it.
if (cache->areas == area && area->cache_next == NULL
&& cache->consumers.IsEmpty()
&& cache->type == CACHE_TYPE_RAM) {
// Create a new cache for the second area.
VMCache* secondCache;
error = VMCacheFactory::CreateAnonymousCache(secondCache, false, 0, 0,
dynamic_cast<VMAnonymousNoSwapCache*>(cache) == NULL,
VM_PRIORITY_USER);
if (error != B_OK) {
addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
return error;
}
secondCache->Lock();
// Transfer the concerned pages from the first cache.
secondCache->MovePageRange(cache, secondBase - area->Base()
+ area->cache_offset, secondSize, area->cache_offset);
secondCache->virtual_base = area->cache_offset;
secondCache->virtual_end = area->cache_offset + secondSize;
// Since VMCache::Resize() can temporarily drop the lock, we must
// unlock all lower caches to prevent locking order inversion.
cacheChainLocker.Unlock(cache);
cache->Resize(cache->virtual_base + firstNewSize, priority);
// Don't unlock the cache yet because we might have to resize it
// back.
// Map the second area.
error = map_backing_store(addressSpace, secondCache, area->cache_offset,
area->name, secondSize, area->wiring, area->protection,
REGION_NO_PRIVATE_MAP, 0, &addressRestrictions, kernel, &secondArea,
NULL);
if (error != B_OK) {
// Restore the original cache.
cache->Resize(cache->virtual_base + oldSize, priority);
// Move the pages back.
cache->MovePageRange(secondCache, area->cache_offset, secondSize,
secondBase - area->Base() + area->cache_offset);
cache->ReleaseRefAndUnlock();
secondCache->ReleaseRefAndUnlock();
addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
return error;
}
// Now we can unlock it.
cache->ReleaseRefAndUnlock();
secondCache->Unlock();
} else {
error = map_backing_store(addressSpace, cache, area->cache_offset
+ (secondBase - area->Base()),
area->name, secondSize, area->wiring, area->protection,
REGION_NO_PRIVATE_MAP, 0, &addressRestrictions, kernel, &secondArea,
NULL);
if (error != B_OK) {
addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
return error;
}
// We need a cache reference for the new area.
cache->AcquireRefLocked();
}
if (_secondArea != NULL)
*_secondArea = secondArea;