From 5d4501aa0187e1a8790784dc2ab3382a16660e93 Mon Sep 17 00:00:00 2001 From: Michael Lotz Date: Thu, 20 Aug 2015 21:54:41 +0200 Subject: [PATCH] Assorted whitespace cleanup and typo fixes. --- headers/private/kernel/thread.h | 2 +- headers/private/kernel/thread_types.h | 2 +- headers/private/shared/OpenHashTable.h | 6 +++--- src/system/kernel/fs/Vnode.h | 2 +- src/system/kernel/fs/vfs.cpp | 10 +++++----- src/tests/add-ons/kernel/kernelland_emu/lock.cpp | 8 ++++---- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/headers/private/kernel/thread.h b/headers/private/kernel/thread.h index b4899ed945..a352f2eae0 100644 --- a/headers/private/kernel/thread.h +++ b/headers/private/kernel/thread.h @@ -302,7 +302,7 @@ thread_is_blocked(Thread* thread) especially with a client lock that uses the thread blocking API. After a blocked thread has been interrupted or the the time out occurred it cannot acquire the client lock (or any other lock using the thread blocking API) - without first making sure that the thread doesn't still appears to be + without first making sure that the thread doesn't still appear to be waiting to other client code. Otherwise another thread could try to unblock it which could erroneously unblock the thread while already waiting on the client lock. So usually when interruptions or timeouts are possible a diff --git a/headers/private/kernel/thread_types.h b/headers/private/kernel/thread_types.h index 1bbab89333..cf9e8fa872 100644 --- a/headers/private/kernel/thread_types.h +++ b/headers/private/kernel/thread_types.h @@ -453,7 +453,7 @@ struct Thread : TeamThreadIteratorEntry, KernelReferenceable { // modified by the thread itself and // thus freely readable by it - void (*cancel_function)(int); + void (*cancel_function)(int); struct { uint8 parameters[SYSCALL_RESTART_PARAMETER_SIZE]; diff --git a/headers/private/shared/OpenHashTable.h b/headers/private/shared/OpenHashTable.h index 8659b32c14..eb000595c6 100644 --- a/headers/private/shared/OpenHashTable.h +++ b/headers/private/shared/OpenHashTable.h @@ -233,21 +233,21 @@ template int32 OpenHashTable::ArraySize() const { - return fArraySize; + return fArraySize; } template int32 OpenHashTable::VectorSize() const { - return fElementVector->Size(); + return fElementVector->Size(); } template int32 OpenHashTable::CountElements() const { - return fElementCount; + return fElementCount; } diff --git a/src/system/kernel/fs/Vnode.h b/src/system/kernel/fs/Vnode.h index d4ef8fd3ff..6efe921a5c 100644 --- a/src/system/kernel/fs/Vnode.h +++ b/src/system/kernel/fs/Vnode.h @@ -80,7 +80,7 @@ private: static const uint32 kFlagsCovering = 0x00000100; static const uint32 kFlagsType = 0xfffff000; - static const uint32 kBucketCount = 32; + static const uint32 kBucketCount = 32; struct LockWaiter : DoublyLinkedListLinkImpl { LockWaiter* next; diff --git a/src/system/kernel/fs/vfs.cpp b/src/system/kernel/fs/vfs.cpp index 68f58c63ec..b279556bd9 100644 --- a/src/system/kernel/fs/vfs.cpp +++ b/src/system/kernel/fs/vfs.cpp @@ -1145,8 +1145,8 @@ restart: rw_lock_read_unlock(&sVnodeLock); if (!canWait || --tries < 0) { // vnode doesn't seem to become unbusy - dprintf("vnode %" B_PRIdDEV ":%" B_PRIdINO " is not becoming unbusy!\n", - mountID, vnodeID); + dprintf("vnode %" B_PRIdDEV ":%" B_PRIdINO + " is not becoming unbusy!\n", mountID, vnodeID); return B_BUSY; } snooze(5000); // 5 ms @@ -1300,7 +1300,7 @@ free_unused_vnodes(int32 level) AutoLocker nodeLocker(vnode); // Check whether the node is still unused -- since we only append to the - // the tail of the unused queue, the vnode should still be at its head. + // tail of the unused queue, the vnode should still be at its head. // Alternatively we could check its ref count for 0 and its busy flag, // but if the node is no longer at the head of the queue, it means it // has been touched in the meantime, i.e. it is no longer the least @@ -2100,7 +2100,7 @@ lookup_dir_entry(struct vnode* dir, const char* name, struct vnode** _vnode) if (status != B_OK) return status; - // The lookup() hook call get_vnode() or publish_vnode(), so we do already + // The lookup() hook calls get_vnode() or publish_vnode(), so we do already // have a reference and just need to look the node up. rw_lock_read_lock(&sVnodeLock); *_vnode = lookup_vnode(dir->device, id); @@ -8890,7 +8890,7 @@ _user_open_entry_ref(dev_t device, ino_t inode, const char* userName, if ((openMode & O_CREAT) != 0) { return file_create_entry_ref(device, inode, name, openMode, perms, - false); + false); } return file_open_entry_ref(device, inode, name, openMode, false); diff --git a/src/tests/add-ons/kernel/kernelland_emu/lock.cpp b/src/tests/add-ons/kernel/kernelland_emu/lock.cpp index 7d5ad1fbce..eb75880c79 100644 --- a/src/tests/add-ons/kernel/kernelland_emu/lock.cpp +++ b/src/tests/add-ons/kernel/kernelland_emu/lock.cpp @@ -479,11 +479,11 @@ _rw_lock_read_unlock_threads_locked(rw_lock* lock) if (--lock->active_readers > 0) return; - if (lock->active_readers < 0) { - panic("rw_lock_read_unlock(): lock %p not read-locked", lock); + if (lock->active_readers < 0) { + panic("rw_lock_read_unlock(): lock %p not read-locked", lock); lock->active_readers = 0; - return; - } + return; + } rw_lock_unblock(lock); }