kernel/fs: Use a spinlock for the unused-vnodes lock.

This lock protects a linked-list. In all cases but one, the only
operation done while holding the lock is to remove a single item
from the list and decrement a counter. Acquiring a mutex itself
involves multiple linked-list operations protected by spinlocks,
so cut out the overhead and just use a spinlock directly.

In the one case where we do more than just remove an item, we
hold an additional write-lock, and so we don't run any risk of
causing "spinlock could not be acquired for a long time" KDLs,
as in that case the threads will be waiting on the rwlock instead.

Reduces lock contention in the VFS. Compiling HaikuDepot and the
mime_db with -j4 (in a VM), the sys time decreased a bit (~10.1s
to ~9.9s), and real time went down by more (~31s to ~29s.) "git status"
performance also improved a bit, but we seem to be contending for
vnode locks now in that case.
This commit is contained in:
Augustin Cavalier 2024-11-18 13:59:25 -05:00
parent 2f37cef1e4
commit 7af4c8a6a9
2 changed files with 13 additions and 7 deletions

View File

@ -26,9 +26,9 @@ const static uint32 kMaxUnusedVnodes = 8192;
/*! \brief Guards sUnusedVnodeList and sUnusedVnodes.
Innermost lock. Must not be held when acquiring any other lock.
Must have at least a read-lock of sHotVnodesLock when acquiring!
*/
static mutex sUnusedVnodesLock = MUTEX_INITIALIZER("unused vnodes");
static spinlock sUnusedVnodesLock = B_SPINLOCK_INITIALIZER;
typedef DoublyLinkedList<Vnode, DoublyLinkedListMemberGetLink<Vnode, &Vnode::unused_link> >
UnusedVnodeList;
static UnusedVnodeList sUnusedVnodeList;
@ -48,7 +48,10 @@ static int32 sUnusedVnodesCheckCount = 0;
static void
flush_hot_vnodes_locked()
{
MutexLocker unusedLocker(sUnusedVnodesLock);
// Since sUnusedVnodesLock is always acquired after sHotVnodesLock,
// we can safely hold it for the whole duration of the flush.
// We don't want to be descheduled while holding the write-lock, anyway.
InterruptsSpinLocker unusedLocker(sUnusedVnodesLock);
int32 count = std::min(sNextHotVnodeIndex, kMaxHotVnodes);
for (int32 i = 0; i < count; i++) {
@ -147,7 +150,7 @@ vnode_used(Vnode* vnode)
vnode->SetUnused(false);
if (!vnode->IsHot()) {
MutexLocker unusedLocker(sUnusedVnodesLock);
InterruptsSpinLocker unusedLocker(sUnusedVnodesLock);
sUnusedVnodeList.Remove(vnode);
sUnusedVnodes--;
}
@ -175,7 +178,7 @@ vnode_to_be_freed(Vnode* vnode)
}
}
} else if (vnode->IsUnused()) {
MutexLocker unusedLocker(sUnusedVnodesLock);
InterruptsSpinLocker unusedLocker(sUnusedVnodesLock);
sUnusedVnodeList.Remove(vnode);
sUnusedVnodes--;
}

View File

@ -1294,7 +1294,8 @@ free_unused_vnodes(int32 level)
// determine how many nodes to free
uint32 count = 1;
{
MutexLocker unusedVnodesLocker(sUnusedVnodesLock);
ReadLocker hotVnodesReadLocker(sHotVnodesLock);
InterruptsSpinLocker unusedVnodesLocker(sUnusedVnodesLock);
switch (level) {
case B_LOW_RESOURCE_NOTE:
@ -1316,9 +1317,10 @@ free_unused_vnodes(int32 level)
for (uint32 i = 0; i < count; i++) {
ReadLocker vnodesReadLocker(sVnodeLock);
ReadLocker hotVnodesReadLocker(sHotVnodesLock);
// get the first node
MutexLocker unusedVnodesLocker(sUnusedVnodesLock);
InterruptsSpinLocker unusedVnodesLocker(sUnusedVnodesLock);
struct vnode* vnode = sUnusedVnodeList.First();
unusedVnodesLocker.Unlock();
@ -1347,6 +1349,7 @@ free_unused_vnodes(int32 level)
// write back changes and free the node
nodeLocker.Unlock();
hotVnodesReadLocker.Unlock();
vnodesReadLocker.Unlock();
if (vnode->cache != NULL)