kernel/vm: Decommit discarded pages of overcommitted caches.

Overcommitted caches should only have commitments equal to the
number of pages they actually contain, so we should decommit
whenever pages are discarded.

This changes the API of VMCache::Discard to return an ssize_t
of the size of pages that were discarded (or a negative error on
failure.) Nothing checked the return value besides things in VMCache
itself, it appears; but it apparently never fails, so that's fine.

Also add asserts to Commit() that the new commitment at least
encompasses all pages the cache actually contains.
This commit is contained in:
Augustin Cavalier 2024-12-13 15:08:45 -05:00
parent 4e993df9e9
commit d00cb444a6
6 changed files with 31 additions and 9 deletions

View File

@ -135,7 +135,7 @@ public:
virtual status_t Adopt(VMCache* source, off_t offset, off_t size,
off_t newOffset);
virtual status_t Discard(off_t offset, off_t size);
virtual ssize_t Discard(off_t offset, off_t size);
status_t FlushAndRemoveAllPages();
@ -220,7 +220,7 @@ private:
void _RemoveConsumer(VMCache* consumer);
bool _FreePageRange(VMCachePagesTree::Iterator it,
page_num_t* toPage);
page_num_t* toPage, page_num_t* discarded);
private:
int32 fRefCount;

View File

@ -604,11 +604,14 @@ VMAnonymousCache::Rebase(off_t newBase, int priority)
}
status_t
ssize_t
VMAnonymousCache::Discard(off_t offset, off_t size)
{
_FreeSwapPageRange(offset, offset + size);
return VMCache::Discard(offset, size);
const ssize_t discarded = VMCache::Discard(offset, size);
if (discarded > 0 && fCanOvercommit)
Commit(committed_size - discarded, VM_PRIORITY_USER);
return discarded;
}
@ -720,6 +723,7 @@ VMAnonymousCache::Commit(off_t size, int priority)
TRACE("%p->VMAnonymousCache::Commit(%" B_PRIdOFF ")\n", this, size);
AssertLocked();
ASSERT(size >= (page_count * B_PAGE_SIZE));
// If we can overcommit, we don't commit here, but in Fault(). We always
// unreserve memory, if we're asked to shrink our commitment, though.

View File

@ -47,7 +47,7 @@ public:
virtual status_t Adopt(VMCache* source, off_t offset,
off_t size, off_t newOffset);
virtual status_t Discard(off_t offset, off_t size);
virtual ssize_t Discard(off_t offset, off_t size);
virtual bool CanOvercommit();
virtual status_t Commit(off_t size, int priority);

View File

@ -58,10 +58,21 @@ VMAnonymousNoSwapCache::Init(bool canOvercommit, int32 numPrecommittedPages,
}
ssize_t
VMAnonymousNoSwapCache::Discard(off_t offset, off_t size)
{
const ssize_t discarded = VMCache::Discard(offset, size);
if (discarded > 0 && fCanOvercommit)
Commit(committed_size - discarded, VM_PRIORITY_USER);
return discarded;
}
status_t
VMAnonymousNoSwapCache::Commit(off_t size, int priority)
{
AssertLocked();
ASSERT(size >= (page_count * B_PAGE_SIZE));
// If we can overcommit, we don't commit here, but in Fault(). We always
// unreserve memory, if we're asked to shrink our commitment, though.

View File

@ -22,6 +22,8 @@ public:
int32 numGuardPages,
uint32 allocationFlags);
virtual ssize_t Discard(off_t offset, off_t size);
virtual bool CanOvercommit();
virtual status_t Commit(off_t size, int priority);
virtual bool HasPage(off_t offset);

View File

@ -1106,7 +1106,7 @@ VMCache::SetMinimalCommitment(off_t commitment, int priority)
bool
VMCache::_FreePageRange(VMCachePagesTree::Iterator it,
page_num_t* toPage = NULL)
page_num_t* toPage = NULL, page_num_t* discarded = NULL)
{
for (vm_page* page = it.Next();
page != NULL && (toPage == NULL || page->cache_offset < *toPage);
@ -1118,6 +1118,8 @@ VMCache::_FreePageRange(VMCachePagesTree::Iterator it,
// as we might cause a deadlock this way
page->busy_writing = false;
// this will notify the writer to free the page
if (discarded != NULL)
(*discarded)++;
continue;
}
@ -1138,6 +1140,8 @@ VMCache::_FreePageRange(VMCachePagesTree::Iterator it,
// removing the current node is safe.
vm_page_free(this, page);
if (discarded != NULL)
(*discarded)++;
}
return false;
@ -1241,15 +1245,16 @@ VMCache::Adopt(VMCache* source, off_t offset, off_t size, off_t newOffset)
/*! Discards pages in the given range. */
status_t
ssize_t
VMCache::Discard(off_t offset, off_t size)
{
page_num_t discarded = 0;
page_num_t startPage = offset >> PAGE_SHIFT;
page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
while (_FreePageRange(pages.GetIterator(startPage, true, true), &endPage))
while (_FreePageRange(pages.GetIterator(startPage, true, true), &endPage, &discarded))
;
return B_OK;
return (discarded * B_PAGE_SIZE);
}