mirror of
https://review.haiku-os.org/haiku
synced 2024-11-23 15:28:58 +01:00
kernel/fs: Make io_context rw_lock'ed.
Applications that don't call open() or like functions too often, and call many FD-related methods across multiple threads at once (like "git status") now don't wait on the context lock as much. ("git status" performance isn't much improved because threads just hit the "unused vnodes" lock instead.)
This commit is contained in:
parent
344ded80d4
commit
a9b5f684f1
@ -76,8 +76,8 @@ extern struct file_descriptor *alloc_fd(void);
|
||||
extern int new_fd_etc(struct io_context *, struct file_descriptor *,
|
||||
int firstIndex);
|
||||
extern int new_fd(struct io_context *, struct file_descriptor *);
|
||||
extern struct file_descriptor *get_fd(struct io_context *, int);
|
||||
extern struct file_descriptor *get_open_fd(struct io_context *, int);
|
||||
extern struct file_descriptor *get_fd(const struct io_context *, int);
|
||||
extern struct file_descriptor *get_open_fd(const struct io_context *, int);
|
||||
extern void close_fd(struct io_context *context,
|
||||
struct file_descriptor *descriptor);
|
||||
extern status_t close_fd_index(struct io_context *context, int fd);
|
||||
@ -91,7 +91,7 @@ extern bool fd_is_valid(int fd, bool kernel);
|
||||
extern struct vnode *fd_vnode(struct file_descriptor *descriptor);
|
||||
extern bool fd_is_file(struct file_descriptor* descriptor);
|
||||
|
||||
extern bool fd_close_on_exec(struct io_context *context, int fd);
|
||||
extern bool fd_close_on_exec(const struct io_context *context, int fd);
|
||||
extern void fd_set_close_on_exec(struct io_context *context, int fd,
|
||||
bool closeFD);
|
||||
|
||||
|
@ -47,8 +47,9 @@ struct vnode;
|
||||
/** The I/O context of a process/team, holds the fd array among others */
|
||||
typedef struct io_context {
|
||||
struct vnode *root;
|
||||
|
||||
mutable rw_lock lock;
|
||||
struct vnode *cwd;
|
||||
mutex io_mutex;
|
||||
int32 ref_count;
|
||||
uint32 table_size;
|
||||
uint32 num_used_fds;
|
||||
@ -70,7 +71,7 @@ status_t vfs_init(struct kernel_args *args);
|
||||
status_t vfs_bootstrap_file_systems(void);
|
||||
void vfs_mount_boot_file_system(struct kernel_args *args);
|
||||
void vfs_exec_io_context(io_context *context);
|
||||
io_context* vfs_new_io_context(io_context* parentContext,
|
||||
io_context* vfs_new_io_context(const io_context *parentContext,
|
||||
bool purgeCloseOnExec);
|
||||
void vfs_get_io_context(io_context *context);
|
||||
void vfs_put_io_context(io_context *context);
|
||||
|
@ -623,9 +623,9 @@ _user_event_queue_create(int openFlags)
|
||||
return fd;
|
||||
}
|
||||
|
||||
mutex_lock(&context->io_mutex);
|
||||
rw_lock_write_lock(&context->lock);
|
||||
fd_set_close_on_exec(context, fd, (openFlags & O_CLOEXEC) != 0);
|
||||
mutex_unlock(&context->io_mutex);
|
||||
rw_lock_write_unlock(&context->lock);
|
||||
|
||||
deleter.Detach();
|
||||
return fd;
|
||||
|
@ -949,7 +949,7 @@ check_max_fds(int numFDs)
|
||||
return true;
|
||||
|
||||
struct io_context *context = get_current_io_context(false);
|
||||
MutexLocker(&context->io_mutex);
|
||||
ReadLocker locker(&context->lock);
|
||||
return (size_t)numFDs <= context->table_size;
|
||||
}
|
||||
|
||||
|
@ -45,7 +45,7 @@ static const size_t kMaxReadDirBufferSize = B_PAGE_SIZE * 2;
|
||||
extern object_cache* sFileDescriptorCache;
|
||||
|
||||
|
||||
static struct file_descriptor* get_fd_locked(struct io_context* context,
|
||||
static struct file_descriptor* get_fd_locked(const struct io_context* context,
|
||||
int fd);
|
||||
static struct file_descriptor* remove_fd(struct io_context* context, int fd);
|
||||
static void deselect_select_infos(file_descriptor* descriptor,
|
||||
@ -93,7 +93,7 @@ alloc_fd(void)
|
||||
|
||||
|
||||
bool
|
||||
fd_close_on_exec(struct io_context* context, int fd)
|
||||
fd_close_on_exec(const struct io_context* context, int fd)
|
||||
{
|
||||
return CHECK_BIT(context->fds_close_on_exec[fd / 8], fd & 7) ? true : false;
|
||||
}
|
||||
@ -122,7 +122,7 @@ new_fd_etc(struct io_context* context, struct file_descriptor* descriptor,
|
||||
if (firstIndex < 0 || (uint32)firstIndex >= context->table_size)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
mutex_lock(&context->io_mutex);
|
||||
WriteLocker locker(context->lock);
|
||||
|
||||
for (i = firstIndex; i < context->table_size; i++) {
|
||||
if (!context->fds[i]) {
|
||||
@ -130,10 +130,8 @@ new_fd_etc(struct io_context* context, struct file_descriptor* descriptor,
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (fd < 0) {
|
||||
fd = B_NO_MORE_FDS;
|
||||
goto err;
|
||||
}
|
||||
if (fd < 0)
|
||||
return B_NO_MORE_FDS;
|
||||
|
||||
TFD(NewFD(context, fd, descriptor));
|
||||
|
||||
@ -141,9 +139,6 @@ new_fd_etc(struct io_context* context, struct file_descriptor* descriptor,
|
||||
context->num_used_fds++;
|
||||
atomic_add(&descriptor->open_count, 1);
|
||||
|
||||
err:
|
||||
mutex_unlock(&context->io_mutex);
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
@ -253,7 +248,7 @@ inc_fd_ref_count(struct file_descriptor* descriptor)
|
||||
|
||||
|
||||
static struct file_descriptor*
|
||||
get_fd_locked(struct io_context* context, int fd)
|
||||
get_fd_locked(const struct io_context* context, int fd)
|
||||
{
|
||||
if (fd < 0 || (uint32)fd >= context->table_size)
|
||||
return NULL;
|
||||
@ -274,18 +269,17 @@ get_fd_locked(struct io_context* context, int fd)
|
||||
|
||||
|
||||
struct file_descriptor*
|
||||
get_fd(struct io_context* context, int fd)
|
||||
get_fd(const struct io_context* context, int fd)
|
||||
{
|
||||
MutexLocker _(context->io_mutex);
|
||||
|
||||
ReadLocker locker(context->lock);
|
||||
return get_fd_locked(context, fd);
|
||||
}
|
||||
|
||||
|
||||
struct file_descriptor*
|
||||
get_open_fd(struct io_context* context, int fd)
|
||||
get_open_fd(const struct io_context* context, int fd)
|
||||
{
|
||||
MutexLocker _(context->io_mutex);
|
||||
ReadLocker locker(context->lock);
|
||||
|
||||
file_descriptor* descriptor = get_fd_locked(context, fd);
|
||||
if (descriptor == NULL)
|
||||
@ -307,7 +301,7 @@ remove_fd(struct io_context* context, int fd)
|
||||
if (fd < 0)
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&context->io_mutex);
|
||||
WriteLocker locker(context->lock);
|
||||
|
||||
if ((uint32)fd < context->table_size)
|
||||
descriptor = context->fds[fd];
|
||||
@ -332,8 +326,6 @@ remove_fd(struct io_context* context, int fd)
|
||||
if (selectInfos != NULL)
|
||||
deselect_select_infos(descriptor, selectInfos, true);
|
||||
|
||||
mutex_unlock(&context->io_mutex);
|
||||
|
||||
return disconnected ? NULL : descriptor;
|
||||
}
|
||||
|
||||
@ -354,12 +346,11 @@ dup_fd(int fd, bool kernel)
|
||||
|
||||
// now put the fd in place
|
||||
status = new_fd(context, descriptor);
|
||||
if (status < 0)
|
||||
if (status < 0) {
|
||||
put_fd(descriptor);
|
||||
else {
|
||||
mutex_lock(&context->io_mutex);
|
||||
} else {
|
||||
WriteLocker locker(context->lock);
|
||||
fd_set_close_on_exec(context, status, false);
|
||||
mutex_unlock(&context->io_mutex);
|
||||
}
|
||||
|
||||
return status;
|
||||
@ -388,7 +379,7 @@ dup2_fd(int oldfd, int newfd, int flags, bool kernel)
|
||||
|
||||
// Get current I/O context and lock it
|
||||
context = get_current_io_context(kernel);
|
||||
mutex_lock(&context->io_mutex);
|
||||
WriteLocker locker(context->lock);
|
||||
|
||||
// Check if the fds are valid (mutex must be locked because
|
||||
// the table size could be changed)
|
||||
@ -396,7 +387,6 @@ dup2_fd(int oldfd, int newfd, int flags, bool kernel)
|
||||
|| (uint32)newfd >= context->table_size
|
||||
|| context->fds[oldfd] == NULL
|
||||
|| (context->fds[oldfd]->open_mode & O_DISCONNECTED) != 0) {
|
||||
mutex_unlock(&context->io_mutex);
|
||||
return B_FILE_ERROR;
|
||||
}
|
||||
|
||||
@ -422,7 +412,7 @@ dup2_fd(int oldfd, int newfd, int flags, bool kernel)
|
||||
|
||||
fd_set_close_on_exec(context, newfd, (flags & O_CLOEXEC) != 0);
|
||||
|
||||
mutex_unlock(&context->io_mutex);
|
||||
locker.Unlock();
|
||||
|
||||
// Say bye bye to the evicted fd
|
||||
if (evicted) {
|
||||
@ -550,7 +540,7 @@ select_fd(int32 fd, struct select_info* info, bool kernel)
|
||||
// define before the context locker, so it will be destroyed after it
|
||||
|
||||
io_context* context = get_current_io_context(kernel);
|
||||
MutexLocker locker(context->io_mutex);
|
||||
ReadLocker readLocker(context->lock);
|
||||
|
||||
descriptor.SetTo(get_fd_locked(context, fd));
|
||||
if (!descriptor.IsSet())
|
||||
@ -573,7 +563,7 @@ select_fd(int32 fd, struct select_info* info, bool kernel)
|
||||
// deselect() will be called on it after it is closed.
|
||||
atomic_add(&descriptor->open_count, 1);
|
||||
|
||||
locker.Unlock();
|
||||
readLocker.Unlock();
|
||||
|
||||
// select any events asked for
|
||||
uint32 selectedEvents = 0;
|
||||
@ -590,7 +580,7 @@ select_fd(int32 fd, struct select_info* info, bool kernel)
|
||||
|
||||
// Add the info to the IO context. Even if nothing has been selected -- we
|
||||
// always support B_EVENT_INVALID.
|
||||
locker.Lock();
|
||||
WriteLocker writeLocker(context->lock);
|
||||
if (context->fds[fd] != descriptor.Get()) {
|
||||
// Someone close()d the index in the meantime. deselect() all
|
||||
// events.
|
||||
@ -630,7 +620,7 @@ deselect_fd(int32 fd, struct select_info* info, bool kernel)
|
||||
// define before the context locker, so it will be destroyed after it
|
||||
|
||||
io_context* context = get_current_io_context(kernel);
|
||||
MutexLocker locker(context->io_mutex);
|
||||
WriteLocker locker(context->lock);
|
||||
|
||||
descriptor.SetTo(get_fd_locked(context, fd));
|
||||
if (!descriptor.IsSet())
|
||||
|
@ -392,9 +392,9 @@ create_socket_fd(net_socket* socket, int flags, bool kernel)
|
||||
put_fd(descriptor);
|
||||
}
|
||||
|
||||
mutex_lock(&context->io_mutex);
|
||||
rw_lock_write_lock(&context->lock);
|
||||
fd_set_close_on_exec(context, fd, (oflags & O_CLOEXEC) != 0);
|
||||
mutex_unlock(&context->io_mutex);
|
||||
rw_lock_write_unlock(&context->lock);
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
@ -1055,7 +1055,7 @@ dec_vnode_ref_count(struct vnode* vnode, bool alwaysFree, bool reenter)
|
||||
ReadLocker locker(sVnodeLock);
|
||||
AutoLocker<Vnode> nodeLocker(vnode);
|
||||
|
||||
int32 oldRefCount = atomic_add(&vnode->ref_count, -1);
|
||||
const int32 oldRefCount = atomic_add(&vnode->ref_count, -1);
|
||||
|
||||
ASSERT_PRINT(oldRefCount > 0, "vnode %p\n", vnode);
|
||||
|
||||
@ -1931,7 +1931,7 @@ disconnect_mount_or_vnode_fds(struct fs_mount* mount,
|
||||
io_context* context = team->io_context;
|
||||
if (context == NULL)
|
||||
continue;
|
||||
MutexLocker contextLocker(context->io_mutex);
|
||||
WriteLocker contextLocker(context->lock);
|
||||
|
||||
teamLocker.Unlock();
|
||||
|
||||
@ -2347,15 +2347,14 @@ path_to_vnode(char* path, bool traverseLink, VnodePutter& _vnode,
|
||||
_vnode.SetTo(start);
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
} else {
|
||||
struct io_context* context = get_current_io_context(kernel);
|
||||
const struct io_context* context = get_current_io_context(kernel);
|
||||
|
||||
mutex_lock(&context->io_mutex);
|
||||
rw_lock_read_lock(&context->lock);
|
||||
start = context->cwd;
|
||||
if (start != NULL)
|
||||
inc_vnode_ref_count(start);
|
||||
mutex_unlock(&context->io_mutex);
|
||||
rw_lock_read_unlock(&context->lock);
|
||||
|
||||
if (start == NULL)
|
||||
return B_ERROR;
|
||||
@ -2867,9 +2866,9 @@ get_new_fd(struct fd_ops* ops, struct fs_mount* mount, struct vnode* vnode,
|
||||
return B_NO_MORE_FDS;
|
||||
}
|
||||
|
||||
mutex_lock(&context->io_mutex);
|
||||
rw_lock_write_lock(&context->lock);
|
||||
fd_set_close_on_exec(context, fd, (openMode & O_CLOEXEC) != 0);
|
||||
mutex_unlock(&context->io_mutex);
|
||||
rw_lock_write_unlock(&context->lock);
|
||||
|
||||
return fd;
|
||||
}
|
||||
@ -3623,8 +3622,6 @@ is_user_in_group(gid_t gid)
|
||||
static status_t
|
||||
free_io_context(io_context* context)
|
||||
{
|
||||
uint32 i;
|
||||
|
||||
TIOC(FreeIOContext(context));
|
||||
|
||||
if (context->root)
|
||||
@ -3633,16 +3630,16 @@ free_io_context(io_context* context)
|
||||
if (context->cwd)
|
||||
put_vnode(context->cwd);
|
||||
|
||||
mutex_lock(&context->io_mutex);
|
||||
rw_lock_write_lock(&context->lock);
|
||||
|
||||
for (i = 0; i < context->table_size; i++) {
|
||||
for (uint32 i = 0; i < context->table_size; i++) {
|
||||
if (struct file_descriptor* descriptor = context->fds[i]) {
|
||||
close_fd(context, descriptor);
|
||||
put_fd(descriptor);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_destroy(&context->io_mutex);
|
||||
rw_lock_destroy(&context->lock);
|
||||
|
||||
remove_node_monitors(context);
|
||||
free(context->fds);
|
||||
@ -3655,22 +3652,16 @@ free_io_context(io_context* context)
|
||||
static status_t
|
||||
resize_monitor_table(struct io_context* context, const int newSize)
|
||||
{
|
||||
int status = B_OK;
|
||||
|
||||
if (newSize <= 0 || newSize > MAX_NODE_MONITORS)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
mutex_lock(&context->io_mutex);
|
||||
WriteLocker locker(context->lock);
|
||||
|
||||
if ((size_t)newSize < context->num_monitors)
|
||||
return B_BUSY;
|
||||
|
||||
if ((size_t)newSize < context->num_monitors) {
|
||||
status = B_BUSY;
|
||||
goto out;
|
||||
}
|
||||
context->max_monitors = newSize;
|
||||
|
||||
out:
|
||||
mutex_unlock(&context->io_mutex);
|
||||
return status;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
@ -4575,19 +4566,15 @@ extern "C" status_t
|
||||
vfs_get_cwd(dev_t* _mountID, ino_t* _vnodeID)
|
||||
{
|
||||
// Get current working directory from io context
|
||||
struct io_context* context = get_current_io_context(false);
|
||||
status_t status = B_OK;
|
||||
const struct io_context* context = get_current_io_context(false);
|
||||
|
||||
mutex_lock(&context->io_mutex);
|
||||
ReadLocker locker(context->lock);
|
||||
if (context->cwd == NULL)
|
||||
return B_ERROR;
|
||||
|
||||
if (context->cwd != NULL) {
|
||||
*_mountID = context->cwd->device;
|
||||
*_vnodeID = context->cwd->id;
|
||||
} else
|
||||
status = B_ERROR;
|
||||
|
||||
mutex_unlock(&context->io_mutex);
|
||||
return status;
|
||||
*_mountID = context->cwd->device;
|
||||
*_vnodeID = context->cwd->id;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
@ -4895,10 +4882,8 @@ vfs_release_posix_lock(io_context* context, struct file_descriptor* descriptor)
|
||||
void
|
||||
vfs_exec_io_context(io_context* context)
|
||||
{
|
||||
uint32 i;
|
||||
|
||||
for (i = 0; i < context->table_size; i++) {
|
||||
mutex_lock(&context->io_mutex);
|
||||
for (uint32 i = 0; i < context->table_size; i++) {
|
||||
rw_lock_write_lock(&context->lock);
|
||||
|
||||
struct file_descriptor* descriptor = context->fds[i];
|
||||
bool remove = false;
|
||||
@ -4910,7 +4895,7 @@ vfs_exec_io_context(io_context* context)
|
||||
remove = true;
|
||||
}
|
||||
|
||||
mutex_unlock(&context->io_mutex);
|
||||
rw_lock_write_unlock(&context->lock);
|
||||
|
||||
if (remove) {
|
||||
close_fd(context, descriptor);
|
||||
@ -4924,7 +4909,7 @@ vfs_exec_io_context(io_context* context)
|
||||
of the parent io_control if it is given.
|
||||
*/
|
||||
io_context*
|
||||
vfs_new_io_context(io_context* parentContext, bool purgeCloseOnExec)
|
||||
vfs_new_io_context(const io_context* parentContext, bool purgeCloseOnExec)
|
||||
{
|
||||
io_context* context = (io_context*)malloc(sizeof(io_context));
|
||||
if (context == NULL)
|
||||
@ -4935,11 +4920,11 @@ vfs_new_io_context(io_context* parentContext, bool purgeCloseOnExec)
|
||||
memset(context, 0, sizeof(io_context));
|
||||
context->ref_count = 1;
|
||||
|
||||
MutexLocker parentLocker;
|
||||
ReadLocker parentLocker;
|
||||
|
||||
size_t tableSize;
|
||||
if (parentContext != NULL) {
|
||||
parentLocker.SetTo(parentContext->io_mutex, false);
|
||||
parentLocker.SetTo(parentContext->lock, false);
|
||||
tableSize = parentContext->table_size;
|
||||
} else
|
||||
tableSize = DEFAULT_FD_TABLE_SIZE;
|
||||
@ -4961,13 +4946,11 @@ vfs_new_io_context(io_context* parentContext, bool purgeCloseOnExec)
|
||||
+ sizeof(struct select_info**) * tableSize
|
||||
+ (tableSize + 7) / 8);
|
||||
|
||||
mutex_init(&context->io_mutex, "I/O context");
|
||||
rw_lock_init(&context->lock, "I/O context");
|
||||
|
||||
// Copy all parent file descriptors
|
||||
|
||||
if (parentContext != NULL) {
|
||||
size_t i;
|
||||
|
||||
mutex_lock(&sIOContextRootLock);
|
||||
context->root = parentContext->root;
|
||||
if (context->root)
|
||||
@ -4979,12 +4962,12 @@ vfs_new_io_context(io_context* parentContext, bool purgeCloseOnExec)
|
||||
inc_vnode_ref_count(context->cwd);
|
||||
|
||||
if (parentContext->inherit_fds) {
|
||||
for (i = 0; i < tableSize; i++) {
|
||||
for (size_t i = 0; i < tableSize; i++) {
|
||||
struct file_descriptor* descriptor = parentContext->fds[i];
|
||||
|
||||
if (descriptor != NULL
|
||||
&& (descriptor->open_mode & O_DISCONNECTED) == 0) {
|
||||
bool closeOnExec = fd_close_on_exec(parentContext, i);
|
||||
&& (descriptor->open_mode & O_DISCONNECTED) == 0) {
|
||||
const bool closeOnExec = fd_close_on_exec(parentContext, i);
|
||||
if (closeOnExec && purgeCloseOnExec)
|
||||
continue;
|
||||
|
||||
@ -5046,7 +5029,7 @@ vfs_resize_fd_table(struct io_context* context, uint32 newSize)
|
||||
|
||||
TIOC(ResizeIOContext(context, newSize));
|
||||
|
||||
MutexLocker _(context->io_mutex);
|
||||
WriteLocker locker(context->lock);
|
||||
|
||||
uint32 oldSize = context->table_size;
|
||||
int oldCloseOnExitBitmapSize = (oldSize + 7) / 8;
|
||||
@ -5216,7 +5199,7 @@ vfs_getrlimit(int resource, struct rlimit* rlp)
|
||||
case RLIMIT_NOFILE:
|
||||
{
|
||||
struct io_context* context = get_current_io_context(false);
|
||||
MutexLocker _(context->io_mutex);
|
||||
ReadLocker _(context->lock);
|
||||
|
||||
rlp->rlim_cur = context->table_size;
|
||||
rlp->rlim_max = MAX_FD_TABLE_SIZE;
|
||||
@ -5226,7 +5209,7 @@ vfs_getrlimit(int resource, struct rlimit* rlp)
|
||||
case RLIMIT_NOVMON:
|
||||
{
|
||||
struct io_context* context = get_current_io_context(false);
|
||||
MutexLocker _(context->io_mutex);
|
||||
ReadLocker _(context->lock);
|
||||
|
||||
rlp->rlim_cur = context->max_monitors;
|
||||
rlp->rlim_max = MAX_NODE_MONITORS;
|
||||
@ -6241,9 +6224,9 @@ common_fcntl(int fd, int op, size_t argument, bool kernel)
|
||||
// Set file descriptor flags
|
||||
|
||||
// O_CLOEXEC is the only flag available at this time
|
||||
mutex_lock(&context->io_mutex);
|
||||
rw_lock_write_lock(&context->lock);
|
||||
fd_set_close_on_exec(context, fd, (argument & FD_CLOEXEC) != 0);
|
||||
mutex_unlock(&context->io_mutex);
|
||||
rw_lock_write_unlock(&context->lock);
|
||||
|
||||
status = B_OK;
|
||||
break;
|
||||
@ -6252,9 +6235,9 @@ common_fcntl(int fd, int op, size_t argument, bool kernel)
|
||||
case F_GETFD:
|
||||
{
|
||||
// Get file descriptor flags
|
||||
mutex_lock(&context->io_mutex);
|
||||
rw_lock_read_lock(&context->lock);
|
||||
status = fd_close_on_exec(context, fd) ? FD_CLOEXEC : 0;
|
||||
mutex_unlock(&context->io_mutex);
|
||||
rw_lock_read_unlock(&context->lock);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -6293,9 +6276,9 @@ common_fcntl(int fd, int op, size_t argument, bool kernel)
|
||||
{
|
||||
status = new_fd_etc(context, descriptor.Get(), (int)argument);
|
||||
if (status >= 0) {
|
||||
mutex_lock(&context->io_mutex);
|
||||
rw_lock_write_lock(&context->lock);
|
||||
fd_set_close_on_exec(context, status, op == F_DUPFD_CLOEXEC);
|
||||
mutex_unlock(&context->io_mutex);
|
||||
rw_lock_write_unlock(&context->lock);
|
||||
|
||||
atomic_add(&descriptor->ref_count, 1);
|
||||
}
|
||||
@ -8128,25 +8111,23 @@ fs_read_attr(int fd, const char *attribute, uint32 type, off_t pos,
|
||||
static status_t
|
||||
get_cwd(char* buffer, size_t size, bool kernel)
|
||||
{
|
||||
// Get current working directory from io context
|
||||
struct io_context* context = get_current_io_context(kernel);
|
||||
status_t status;
|
||||
|
||||
FUNCTION(("vfs_get_cwd: buf %p, size %ld\n", buffer, size));
|
||||
|
||||
mutex_lock(&context->io_mutex);
|
||||
// Get current working directory from io context
|
||||
const struct io_context* context = get_current_io_context(kernel);
|
||||
rw_lock_read_lock(&context->lock);
|
||||
|
||||
struct vnode* vnode = context->cwd;
|
||||
if (vnode)
|
||||
if (vnode != NULL)
|
||||
inc_vnode_ref_count(vnode);
|
||||
|
||||
mutex_unlock(&context->io_mutex);
|
||||
rw_lock_read_unlock(&context->lock);
|
||||
|
||||
if (vnode) {
|
||||
status = dir_vnode_to_path(vnode, buffer, size, kernel);
|
||||
put_vnode(vnode);
|
||||
} else
|
||||
status = B_ERROR;
|
||||
if (vnode == NULL)
|
||||
return B_ERROR;
|
||||
|
||||
status_t status = dir_vnode_to_path(vnode, buffer, size, kernel);
|
||||
put_vnode(vnode);
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -8180,13 +8161,13 @@ set_cwd(int fd, char* path, bool kernel)
|
||||
|
||||
// Get current io context and lock
|
||||
context = get_current_io_context(kernel);
|
||||
mutex_lock(&context->io_mutex);
|
||||
rw_lock_write_lock(&context->lock);
|
||||
|
||||
// save the old current working directory first
|
||||
oldDirectory = context->cwd;
|
||||
context->cwd = vnode.Detach();
|
||||
|
||||
mutex_unlock(&context->io_mutex);
|
||||
rw_lock_write_unlock(&context->lock);
|
||||
|
||||
if (oldDirectory)
|
||||
put_vnode(oldDirectory);
|
||||
@ -8292,14 +8273,14 @@ _kern_get_next_fd_info(team_id teamID, uint32* _cookie, fd_info* info,
|
||||
BReference<Team> teamReference(team, true);
|
||||
|
||||
// now that we have a team reference, its I/O context won't go away
|
||||
io_context* context = team->io_context;
|
||||
MutexLocker contextLocker(context->io_mutex);
|
||||
const io_context* context = team->io_context;
|
||||
ReadLocker contextLocker(context->lock);
|
||||
|
||||
uint32 slot = *_cookie;
|
||||
|
||||
struct file_descriptor* descriptor;
|
||||
while (slot < context->table_size
|
||||
&& (descriptor = context->fds[slot]) == NULL) {
|
||||
&& (descriptor = context->fds[slot]) == NULL) {
|
||||
slot++;
|
||||
}
|
||||
|
||||
|
@ -4558,7 +4558,7 @@ _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
|
||||
dev_t cwdDevice;
|
||||
ino_t cwdDirectory;
|
||||
{
|
||||
MutexLocker ioContextLocker(ioContext->io_mutex);
|
||||
ReadLocker ioContextLocker(ioContext->lock);
|
||||
vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user