diff --git a/headers/private/kernel/vm/VMCache.h b/headers/private/kernel/vm/VMCache.h index 8bfc57e323..9101ff75b9 100644 --- a/headers/private/kernel/vm/VMCache.h +++ b/headers/private/kernel/vm/VMCache.h @@ -135,7 +135,7 @@ public: virtual status_t Adopt(VMCache* source, off_t offset, off_t size, off_t newOffset); - virtual status_t Discard(off_t offset, off_t size); + virtual ssize_t Discard(off_t offset, off_t size); status_t FlushAndRemoveAllPages(); @@ -220,7 +220,7 @@ private: void _RemoveConsumer(VMCache* consumer); bool _FreePageRange(VMCachePagesTree::Iterator it, - page_num_t* toPage); + page_num_t* toPage, page_num_t* discarded); private: int32 fRefCount; diff --git a/src/system/kernel/vm/VMAnonymousCache.cpp b/src/system/kernel/vm/VMAnonymousCache.cpp index b77f825e75..eff47ce2f6 100644 --- a/src/system/kernel/vm/VMAnonymousCache.cpp +++ b/src/system/kernel/vm/VMAnonymousCache.cpp @@ -604,11 +604,14 @@ VMAnonymousCache::Rebase(off_t newBase, int priority) } -status_t +ssize_t VMAnonymousCache::Discard(off_t offset, off_t size) { _FreeSwapPageRange(offset, offset + size); - return VMCache::Discard(offset, size); + const ssize_t discarded = VMCache::Discard(offset, size); + if (discarded > 0 && fCanOvercommit) + Commit(committed_size - discarded, VM_PRIORITY_USER); + return discarded; } @@ -720,6 +723,7 @@ VMAnonymousCache::Commit(off_t size, int priority) TRACE("%p->VMAnonymousCache::Commit(%" B_PRIdOFF ")\n", this, size); AssertLocked(); + ASSERT(size >= (page_count * B_PAGE_SIZE)); // If we can overcommit, we don't commit here, but in Fault(). We always // unreserve memory, if we're asked to shrink our commitment, though. diff --git a/src/system/kernel/vm/VMAnonymousCache.h b/src/system/kernel/vm/VMAnonymousCache.h index b312e52df0..1d1142869d 100644 --- a/src/system/kernel/vm/VMAnonymousCache.h +++ b/src/system/kernel/vm/VMAnonymousCache.h @@ -47,7 +47,7 @@ public: virtual status_t Adopt(VMCache* source, off_t offset, off_t size, off_t newOffset); - virtual status_t Discard(off_t offset, off_t size); + virtual ssize_t Discard(off_t offset, off_t size); virtual bool CanOvercommit(); virtual status_t Commit(off_t size, int priority); diff --git a/src/system/kernel/vm/VMAnonymousNoSwapCache.cpp b/src/system/kernel/vm/VMAnonymousNoSwapCache.cpp index 09bc70f082..d5d7276f0f 100644 --- a/src/system/kernel/vm/VMAnonymousNoSwapCache.cpp +++ b/src/system/kernel/vm/VMAnonymousNoSwapCache.cpp @@ -58,10 +58,21 @@ VMAnonymousNoSwapCache::Init(bool canOvercommit, int32 numPrecommittedPages, } +ssize_t +VMAnonymousNoSwapCache::Discard(off_t offset, off_t size) +{ + const ssize_t discarded = VMCache::Discard(offset, size); + if (discarded > 0 && fCanOvercommit) + Commit(committed_size - discarded, VM_PRIORITY_USER); + return discarded; +} + + status_t VMAnonymousNoSwapCache::Commit(off_t size, int priority) { AssertLocked(); + ASSERT(size >= (page_count * B_PAGE_SIZE)); // If we can overcommit, we don't commit here, but in Fault(). We always // unreserve memory, if we're asked to shrink our commitment, though. diff --git a/src/system/kernel/vm/VMAnonymousNoSwapCache.h b/src/system/kernel/vm/VMAnonymousNoSwapCache.h index 615f20f026..8acda57f92 100644 --- a/src/system/kernel/vm/VMAnonymousNoSwapCache.h +++ b/src/system/kernel/vm/VMAnonymousNoSwapCache.h @@ -22,6 +22,8 @@ public: int32 numGuardPages, uint32 allocationFlags); + virtual ssize_t Discard(off_t offset, off_t size); + virtual bool CanOvercommit(); virtual status_t Commit(off_t size, int priority); virtual bool HasPage(off_t offset); diff --git a/src/system/kernel/vm/VMCache.cpp b/src/system/kernel/vm/VMCache.cpp index d152ad47bc..49c586e5c7 100644 --- a/src/system/kernel/vm/VMCache.cpp +++ b/src/system/kernel/vm/VMCache.cpp @@ -1106,7 +1106,7 @@ VMCache::SetMinimalCommitment(off_t commitment, int priority) bool VMCache::_FreePageRange(VMCachePagesTree::Iterator it, - page_num_t* toPage = NULL) + page_num_t* toPage = NULL, page_num_t* discarded = NULL) { for (vm_page* page = it.Next(); page != NULL && (toPage == NULL || page->cache_offset < *toPage); @@ -1118,6 +1118,8 @@ VMCache::_FreePageRange(VMCachePagesTree::Iterator it, // as we might cause a deadlock this way page->busy_writing = false; // this will notify the writer to free the page + if (discarded != NULL) + (*discarded)++; continue; } @@ -1138,6 +1140,8 @@ VMCache::_FreePageRange(VMCachePagesTree::Iterator it, // removing the current node is safe. vm_page_free(this, page); + if (discarded != NULL) + (*discarded)++; } return false; @@ -1241,15 +1245,16 @@ VMCache::Adopt(VMCache* source, off_t offset, off_t size, off_t newOffset) /*! Discards pages in the given range. */ -status_t +ssize_t VMCache::Discard(off_t offset, off_t size) { + page_num_t discarded = 0; page_num_t startPage = offset >> PAGE_SHIFT; page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT; - while (_FreePageRange(pages.GetIterator(startPage, true, true), &endPage)) + while (_FreePageRange(pages.GetIterator(startPage, true, true), &endPage, &discarded)) ; - return B_OK; + return (discarded * B_PAGE_SIZE); }