mirror of
https://review.haiku-os.org/haiku
synced 2025-01-18 12:38:51 +01:00
kernel/vm: use DoublyLinkedList for VMCache areas list
Change-Id: I0c6a231f245fa542f5d90959755de1e6ba39eb8e Reviewed-on: https://review.haiku-os.org/c/haiku/+/8685 Reviewed-by: waddlesplash <waddlesplash@gmail.com>
This commit is contained in:
parent
04c90835ac
commit
8ea4167e8b
@ -119,8 +119,13 @@ public:
|
||||
uint8* page_protections;
|
||||
|
||||
struct VMAddressSpace* address_space;
|
||||
struct VMArea* cache_next;
|
||||
struct VMArea* cache_prev;
|
||||
|
||||
private:
|
||||
DoublyLinkedListLink<VMArea> fCacheLink;
|
||||
|
||||
public:
|
||||
typedef DoublyLinkedList<VMArea,
|
||||
DoublyLinkedListMemberGetLink<VMArea, &VMArea::fCacheLink> > CacheList;
|
||||
|
||||
addr_t Base() const { return fBase; }
|
||||
size_t Size() const { return fSize; }
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <util/DoublyLinkedList.h>
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_types.h>
|
||||
#include <vm/VMArea.h>
|
||||
|
||||
#include "kernel_debug_config.h"
|
||||
|
||||
@ -188,7 +189,7 @@ protected:
|
||||
virtual void DeleteObject() = 0;
|
||||
|
||||
public:
|
||||
VMArea* areas;
|
||||
VMArea::CacheList areas;
|
||||
ConsumerList consumers;
|
||||
// list of caches that use this cache as a source
|
||||
VMCachePagesTree pages;
|
||||
|
2
src/system/kernel/cache/file_cache.cpp
vendored
2
src/system/kernel/cache/file_cache.cpp
vendored
@ -291,7 +291,7 @@ reserve_pages(file_cache_ref* ref, vm_page_reservation* reservation,
|
||||
VMCache* cache = ref->cache;
|
||||
cache->Lock();
|
||||
|
||||
if (cache->consumers.IsEmpty() && cache->areas == NULL
|
||||
if (cache->consumers.IsEmpty() && cache->areas.IsEmpty()
|
||||
&& access_is_sequential(ref)) {
|
||||
// we are not mapped, and we're accessed sequentially
|
||||
|
||||
|
@ -524,9 +524,9 @@ MultiAddressSpaceLocker::AddAreaCacheAndLock(area_id areaID,
|
||||
|
||||
while (true) {
|
||||
// add all areas
|
||||
VMArea* firstArea = cache->areas;
|
||||
VMArea* firstArea = cache->areas.First();
|
||||
for (VMArea* current = firstArea; current;
|
||||
current = current->cache_next) {
|
||||
current = cache->areas.GetNext(current)) {
|
||||
error = AddArea(current,
|
||||
current == area ? writeLockThisOne : writeLockOthers);
|
||||
if (error != B_OK) {
|
||||
@ -558,7 +558,7 @@ MultiAddressSpaceLocker::AddAreaCacheAndLock(area_id areaID,
|
||||
|
||||
// If neither the area's cache has changed nor its area list we're
|
||||
// done.
|
||||
if (cache == oldCache && firstArea == cache->areas) {
|
||||
if (cache == oldCache && firstArea == cache->areas.First()) {
|
||||
_area = area;
|
||||
if (_cache != NULL)
|
||||
*_cache = cache;
|
||||
|
@ -33,9 +33,7 @@ VMArea::VMArea(VMAddressSpace* addressSpace, uint32 wiring, uint32 protection)
|
||||
cache_offset(0),
|
||||
cache_type(0),
|
||||
page_protections(NULL),
|
||||
address_space(addressSpace),
|
||||
cache_next(NULL),
|
||||
cache_prev(NULL)
|
||||
address_space(addressSpace)
|
||||
{
|
||||
new (&mappings) VMAreaMappings;
|
||||
}
|
||||
|
@ -611,7 +611,7 @@ VMCacheRef::VMCacheRef(VMCache* cache)
|
||||
bool
|
||||
VMCache::_IsMergeable() const
|
||||
{
|
||||
return areas == NULL && temporary && !unmergeable
|
||||
return areas.IsEmpty() && temporary && !unmergeable
|
||||
&& !consumers.IsEmpty() && consumers.Head() == consumers.Tail();
|
||||
}
|
||||
|
||||
@ -636,7 +636,6 @@ VMCache::Init(uint32 cacheType, uint32 allocationFlags)
|
||||
{
|
||||
mutex_init(&fLock, "VMCache");
|
||||
|
||||
areas = NULL;
|
||||
fRefCount = 1;
|
||||
source = NULL;
|
||||
virtual_base = 0;
|
||||
@ -677,7 +676,7 @@ VMCache::Init(uint32 cacheType, uint32 allocationFlags)
|
||||
void
|
||||
VMCache::Delete()
|
||||
{
|
||||
if (areas != NULL)
|
||||
if (!areas.IsEmpty())
|
||||
panic("cache %p to be deleted still has areas", this);
|
||||
if (!consumers.IsEmpty())
|
||||
panic("cache %p to be deleted still has consumers", this);
|
||||
@ -980,15 +979,11 @@ status_t
|
||||
VMCache::InsertAreaLocked(VMArea* area)
|
||||
{
|
||||
TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area));
|
||||
AssertLocked();
|
||||
|
||||
T(InsertArea(this, area));
|
||||
|
||||
area->cache_next = areas;
|
||||
if (area->cache_next)
|
||||
area->cache_next->cache_prev = area;
|
||||
area->cache_prev = NULL;
|
||||
areas = area;
|
||||
AssertLocked();
|
||||
|
||||
areas.Insert(area, false);
|
||||
|
||||
AcquireStoreRef();
|
||||
|
||||
@ -1011,12 +1006,7 @@ VMCache::RemoveArea(VMArea* area)
|
||||
|
||||
AutoLocker<VMCache> locker(this);
|
||||
|
||||
if (area->cache_prev)
|
||||
area->cache_prev->cache_next = area->cache_next;
|
||||
if (area->cache_next)
|
||||
area->cache_next->cache_prev = area->cache_prev;
|
||||
if (areas == area)
|
||||
areas = area->cache_next;
|
||||
areas.Remove(area);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
@ -1030,12 +1020,11 @@ VMCache::TransferAreas(VMCache* fromCache)
|
||||
{
|
||||
AssertLocked();
|
||||
fromCache->AssertLocked();
|
||||
ASSERT(areas == NULL);
|
||||
ASSERT(areas.IsEmpty());
|
||||
|
||||
areas = fromCache->areas;
|
||||
fromCache->areas = NULL;
|
||||
areas.TakeFrom(&fromCache->areas);
|
||||
|
||||
for (VMArea* area = areas; area != NULL; area = area->cache_next) {
|
||||
for (VMArea* area = areas.First(); area != NULL; area = areas.GetNext(area)) {
|
||||
area->cache = this;
|
||||
AcquireRefLocked();
|
||||
fromCache->ReleaseRefLocked();
|
||||
@ -1051,7 +1040,7 @@ VMCache::CountWritableAreas(VMArea* ignoreArea) const
|
||||
{
|
||||
uint32 count = 0;
|
||||
|
||||
for (VMArea* area = areas; area != NULL; area = area->cache_next) {
|
||||
for (VMArea* area = areas.First(); area != NULL; area = areas.GetNext(area)) {
|
||||
if (area != ignoreArea
|
||||
&& (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0) {
|
||||
count++;
|
||||
@ -1471,7 +1460,7 @@ VMCache::Dump(bool showPages) const
|
||||
#endif
|
||||
kprintf(" areas:\n");
|
||||
|
||||
for (VMArea* area = areas; area != NULL; area = area->cache_next) {
|
||||
for (VMArea* area = areas.First(); area != NULL; area = areas.GetNext(area)) {
|
||||
kprintf(" area 0x%" B_PRIx32 ", %s\n", area->id, area->name);
|
||||
kprintf("\tbase_addr: 0x%lx, size: 0x%lx\n", area->Base(),
|
||||
area->Size());
|
||||
|
@ -780,7 +780,7 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
|
||||
|
||||
// If no one else uses the area's cache and it's an anonymous cache, we can
|
||||
// resize or split it, too.
|
||||
bool onlyCacheUser = cache->areas == area && area->cache_next == NULL
|
||||
bool onlyCacheUser = cache->areas.First() == area && cache->areas.GetNext(area) == NULL
|
||||
&& cache->consumers.IsEmpty() && area->cache_type == CACHE_TYPE_RAM;
|
||||
|
||||
const addr_t oldSize = area->Size();
|
||||
@ -1110,7 +1110,7 @@ discard_area_range(VMArea* area, addr_t address, addr_t size)
|
||||
// If someone else uses the area's cache or it's not an anonymous cache, we
|
||||
// can't discard.
|
||||
VMCache* cache = vm_area_get_locked_cache(area);
|
||||
if (cache->areas != area || area->cache_next != NULL
|
||||
if (cache->areas.First() != area || VMArea::CacheList::GetNext(area) != NULL
|
||||
|| !cache->consumers.IsEmpty() || cache->type != CACHE_TYPE_RAM) {
|
||||
return B_OK;
|
||||
}
|
||||
@ -2827,8 +2827,8 @@ vm_copy_on_write_area(VMCache* lowerCache,
|
||||
DEBUG_PAGE_ACCESS_END(copiedPage);
|
||||
} else {
|
||||
// Change the protection of this page in all areas.
|
||||
for (VMArea* tempArea = upperCache->areas; tempArea != NULL;
|
||||
tempArea = tempArea->cache_next) {
|
||||
for (VMArea* tempArea = upperCache->areas.First(); tempArea != NULL;
|
||||
tempArea = upperCache->areas.GetNext(tempArea)) {
|
||||
if (!is_page_in_area(tempArea, page))
|
||||
continue;
|
||||
|
||||
@ -2854,8 +2854,8 @@ vm_copy_on_write_area(VMCache* lowerCache,
|
||||
ASSERT(lowerCache->WiredPagesCount() == 0);
|
||||
|
||||
// just change the protection of all areas
|
||||
for (VMArea* tempArea = upperCache->areas; tempArea != NULL;
|
||||
tempArea = tempArea->cache_next) {
|
||||
for (VMArea* tempArea = upperCache->areas.First(); tempArea != NULL;
|
||||
tempArea = upperCache->areas.GetNext(tempArea)) {
|
||||
if (tempArea->page_protections != NULL) {
|
||||
// Change the protection of all pages in this area.
|
||||
VMTranslationMap* map = tempArea->address_space->TranslationMap();
|
||||
@ -3125,8 +3125,8 @@ vm_set_area_protection(team_id team, area_id areaID, uint32 newProtection,
|
||||
// vm_copy_on_write_area(), all areas of the cache) doesn't have any
|
||||
// wired ranges.
|
||||
if (!isWritable && becomesWritable && !cache->consumers.IsEmpty()) {
|
||||
for (VMArea* otherArea = cache->areas; otherArea != NULL;
|
||||
otherArea = otherArea->cache_next) {
|
||||
for (VMArea* otherArea = cache->areas.First(); otherArea != NULL;
|
||||
otherArea = cache->areas.GetNext(otherArea)) {
|
||||
if (wait_if_area_is_wired(otherArea, &locker, &cacheLocker)) {
|
||||
restart = true;
|
||||
break;
|
||||
@ -4939,8 +4939,8 @@ vm_resize_area(area_id areaID, size_t newSize, bool kernel)
|
||||
|
||||
if (oldSize < newSize) {
|
||||
// We need to check if all areas of this cache can be resized.
|
||||
for (VMArea* current = cache->areas; current != NULL;
|
||||
current = current->cache_next) {
|
||||
for (VMArea* current = cache->areas.First(); current != NULL;
|
||||
current = cache->areas.GetNext(current)) {
|
||||
if (!current->address_space->CanResizeArea(current, newSize))
|
||||
return B_ERROR;
|
||||
anyKernelArea
|
||||
@ -4949,8 +4949,8 @@ vm_resize_area(area_id areaID, size_t newSize, bool kernel)
|
||||
} else {
|
||||
// We're shrinking the areas, so we must make sure the affected
|
||||
// ranges are not wired.
|
||||
for (VMArea* current = cache->areas; current != NULL;
|
||||
current = current->cache_next) {
|
||||
for (VMArea* current = cache->areas.First(); current != NULL;
|
||||
current = cache->areas.GetNext(current)) {
|
||||
anyKernelArea
|
||||
|= current->address_space == VMAddressSpace::Kernel();
|
||||
|
||||
@ -4978,8 +4978,8 @@ vm_resize_area(area_id areaID, size_t newSize, bool kernel)
|
||||
return status;
|
||||
}
|
||||
|
||||
for (VMArea* current = cache->areas; current != NULL;
|
||||
current = current->cache_next) {
|
||||
for (VMArea* current = cache->areas.First(); current != NULL;
|
||||
current = cache->areas.GetNext(current)) {
|
||||
status = current->address_space->ResizeArea(current, newSize,
|
||||
allocationFlags);
|
||||
if (status != B_OK)
|
||||
@ -5032,8 +5032,8 @@ vm_resize_area(area_id areaID, size_t newSize, bool kernel)
|
||||
if (status != B_OK) {
|
||||
// Something failed -- resize the areas back to their original size.
|
||||
// This can fail, too, in which case we're seriously screwed.
|
||||
for (VMArea* current = cache->areas; current != NULL;
|
||||
current = current->cache_next) {
|
||||
for (VMArea* current = cache->areas.First(); current != NULL;
|
||||
current = cache->areas.GetNext(current)) {
|
||||
if (current->address_space->ResizeArea(current, oldSize,
|
||||
allocationFlags) != B_OK) {
|
||||
panic("vm_resize_area(): Failed and not being able to restore "
|
||||
|
@ -331,13 +331,13 @@ dump_caches_recursively(VMCache* cache, cache_info& info, int level)
|
||||
}
|
||||
|
||||
// areas
|
||||
if (cache->areas != NULL) {
|
||||
VMArea* area = cache->areas;
|
||||
if (!cache->areas.IsEmpty()) {
|
||||
VMArea* area = cache->areas.First();
|
||||
kprintf(", areas: %" B_PRId32 " (%s, team: %" B_PRId32 ")", area->id,
|
||||
area->name, area->address_space->ID());
|
||||
|
||||
while (area->cache_next != NULL) {
|
||||
area = area->cache_next;
|
||||
while (cache->areas.GetNext(area) != NULL) {
|
||||
area = cache->areas.GetNext(area);
|
||||
kprintf(", %" B_PRId32, area->id);
|
||||
}
|
||||
}
|
||||
@ -479,8 +479,8 @@ dump_area_struct(VMArea* area, bool mappings)
|
||||
kprintf("cache:\t\t%p\n", area->cache);
|
||||
kprintf("cache_type:\t%s\n", vm_cache_type_to_string(area->cache_type));
|
||||
kprintf("cache_offset:\t0x%" B_PRIx64 "\n", area->cache_offset);
|
||||
kprintf("cache_next:\t%p\n", area->cache_next);
|
||||
kprintf("cache_prev:\t%p\n", area->cache_prev);
|
||||
kprintf("cache_next:\t%p\n", VMArea::CacheList::GetNext(area));
|
||||
kprintf("cache_prev:\t%p\n", VMArea::CacheList::GetPrevious(area));
|
||||
|
||||
VMAreaMappings::Iterator iterator = area->mappings.GetIterator();
|
||||
if (mappings) {
|
||||
|
Loading…
Reference in New Issue
Block a user