locked_pool: Delete.

The SCSI module was the only thing using it, and now it isn't
used at all.
This commit is contained in:
Augustin Cavalier 2024-11-05 14:05:04 -05:00
parent 79efafa0e0
commit 6e1bd12f74
11 changed files with 0 additions and 737 deletions

View File

@ -66,7 +66,6 @@ AddFilesToFloppyBootArchive system add-ons kernel file_systems
: $(SYSTEM_ADD_ONS_FILE_SYSTEMS) ; : $(SYSTEM_ADD_ONS_FILE_SYSTEMS) ;
AddFilesToFloppyBootArchive system add-ons kernel generic : AddFilesToFloppyBootArchive system add-ons kernel generic :
ata_adapter ata_adapter
locked_pool
scsi_periph scsi_periph
; ;
AddFilesToFloppyBootArchive system add-ons kernel partitioning_systems : AddFilesToFloppyBootArchive system add-ons kernel partitioning_systems :
@ -155,7 +154,6 @@ AddBootModuleSymlinksToFloppyBootArchive [ FFilterByBuildFeatures
$(SYSTEM_ADD_ONS_BUS_MANAGERS) $(SYSTEM_ADD_ONS_BUS_MANAGERS)
openpic@ppc openpic@ppc
ata_adapter ata_adapter
locked_pool
scsi_periph scsi_periph
generic_x86@x86 generic_x86@x86
ahci ahci

View File

@ -66,7 +66,6 @@ AddFilesToNetBootArchive system add-ons kernel file_systems
: $(SYSTEM_ADD_ONS_FILE_SYSTEMS) ; : $(SYSTEM_ADD_ONS_FILE_SYSTEMS) ;
AddFilesToNetBootArchive system add-ons kernel generic : AddFilesToNetBootArchive system add-ons kernel generic :
ata_adapter ata_adapter
locked_pool
scsi_periph scsi_periph
; ;
AddFilesToNetBootArchive system add-ons kernel partitioning_systems : AddFilesToNetBootArchive system add-ons kernel partitioning_systems :
@ -134,7 +133,6 @@ AddBootModuleSymlinksToNetBootArchive
silicon_image_3112 silicon_image_3112
$(SYSTEM_ADD_ONS_FILE_SYSTEMS) $(SYSTEM_ADD_ONS_FILE_SYSTEMS)
ata_adapter ata_adapter
locked_pool
scsi_periph scsi_periph
intel intel
session session

View File

@ -83,7 +83,6 @@ AddFilesToPackage add-ons kernel generic :
ata_adapter ata_adapter
bios@x86,x86_64 bios@x86,x86_64
dpc dpc
locked_pool
mpu401 mpu401
scsi_periph scsi_periph
smbios@x86,x86_64 smbios@x86,x86_64
@ -325,7 +324,6 @@ AddBootModuleSymlinksToPackage
intel intel
it8211 it8211
legacy_sata legacy_sata
locked_pool
mmc mmc
mmc_disk mmc_disk
nvme_disk nvme_disk

View File

@ -48,7 +48,6 @@ AddFilesToPackage add-ons kernel generic :
ata_adapter ata_adapter
bios@x86,x86_64 bios@x86,x86_64
dpc dpc
locked_pool
mpu401 mpu401
scsi_periph scsi_periph
<module>tty <module>tty
@ -242,7 +241,6 @@ AddBootModuleSymlinksToPackage
usb usb
openpic@ppc openpic@ppc
ata_adapter ata_adapter
locked_pool
scsi_periph scsi_periph
ahci ahci
generic_ide_pci generic_ide_pci

View File

@ -1,71 +0,0 @@
/*
* Copyright 2002-2003, Thomas Kurschel. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef __LOCKED_POOL_H__
#define __LOCKED_POOL_H__
/*! Paging-safe allocation of locked memory.
Library for managing temporary, locked memory with the condition of
not calling any function during allocation that can lead to paging.
Such memory is needed by drivers that are used to access the page file
but still need locked memory to execute requests.
Basically, a background thread manages a memory pool where blocks
are allocated from. If the pool is empty, allocation is delayed until
either a blocks is freed or the pool is enlarged by the background
thread.
All memory blocks must have same size and can be pre-initialized when
added to memory pool (and cleaned-up when removed from pool). The
free list is stored within free memory blocks, so you have to specify
a block offset where the manager can store the list pointers without
interfering with pre-initialization.
You can also specify an alignment, e.g. if the blocks are used for
DMA access, a minimum pool size (in blocks), a maximum pool size
(in blocks) and the size of memory chunks to be added if the entire
pool is allocated.
*/
#include <device_manager.h>
typedef struct locked_pool *locked_pool_cookie;
typedef status_t (*locked_pool_add_hook)(void *block, void *arg);
typedef void (*locked_pool_remove_hook)(void *block, void *arg);
typedef struct {
module_info minfo;
// allocate block
void *(*alloc)(locked_pool_cookie pool);
// free block
void (*free)(locked_pool_cookie pool, void *block);
// create new pool
// block_size - size of one memory block
// alignment - set address bits here that must be zero for block addresses
// next_ofs - offset in block where internal next-pointer can be stored
// chunk_size - how much system memory is to be allocated at once
// max_blocks - maximum number of blocks
// min_free_block - minimum number of free blocks
// name - name of pool
// lock_flags - flags to be passed to lock_memory()
// alloc_hook - hook to be called when new block is added to pool (can be NULL )
// free_hook - hook to be called when block is removed from pool (can be NULL )
// hook_arg - value to be passed to hooks as arg
locked_pool_cookie (*create)(int block_size, int alignment, int next_ofs,
int chunk_size, int max_blocks, int min_free_blocks, const char *name,
uint32 lock_flags, locked_pool_add_hook add_hook,
locked_pool_remove_hook remove_hook, void *hook_arg);
void (*destroy)(locked_pool_cookie pool);
} locked_pool_interface;
#define LOCKED_POOL_MODULE_NAME "generic/locked_pool/v1"
#endif

View File

@ -5,12 +5,10 @@
#include "scsi_internal.h" #include "scsi_internal.h"
locked_pool_interface *locked_pool;
device_manager_info *pnp; device_manager_info *pnp;
module_dependency module_dependencies[] = { module_dependency module_dependencies[] = {
{ B_DEVICE_MANAGER_MODULE_NAME, (module_info **)&pnp }, { B_DEVICE_MANAGER_MODULE_NAME, (module_info **)&pnp },
{ LOCKED_POOL_MODULE_NAME, (module_info **)&locked_pool },
{} {}
}; };

View File

@ -11,7 +11,6 @@
#include <bus/SCSI.h> #include <bus/SCSI.h>
#include <scsi_cmds.h> #include <scsi_cmds.h>
#include <locked_pool.h>
#include <device_manager.h> #include <device_manager.h>
#include <lock.h> #include <lock.h>
@ -216,7 +215,6 @@ enum {
}; };
extern locked_pool_interface *locked_pool;
extern device_manager_info *pnp; extern device_manager_info *pnp;
extern scsi_for_sim_interface scsi_for_sim_module; extern scsi_for_sim_interface scsi_for_sim_module;

View File

@ -3,7 +3,6 @@ SubDir HAIKU_TOP src add-ons kernel generic ;
SubInclude HAIKU_TOP src add-ons kernel generic ata_adapter ; SubInclude HAIKU_TOP src add-ons kernel generic ata_adapter ;
SubInclude HAIKU_TOP src add-ons kernel generic bios ; SubInclude HAIKU_TOP src add-ons kernel generic bios ;
SubInclude HAIKU_TOP src add-ons kernel generic dpc ; SubInclude HAIKU_TOP src add-ons kernel generic dpc ;
SubInclude HAIKU_TOP src add-ons kernel generic locked_pool ;
SubInclude HAIKU_TOP src add-ons kernel generic mpu401 ; SubInclude HAIKU_TOP src add-ons kernel generic mpu401 ;
SubInclude HAIKU_TOP src add-ons kernel generic scsi_periph ; SubInclude HAIKU_TOP src add-ons kernel generic scsi_periph ;
SubInclude HAIKU_TOP src add-ons kernel generic smbios ; SubInclude HAIKU_TOP src add-ons kernel generic smbios ;

View File

@ -1,12 +0,0 @@
SubDir HAIKU_TOP src add-ons kernel generic locked_pool ;
UsePrivateKernelHeaders ;
# enable debug output, if debugging is enabled
if $(DEBUG) != 0 {
SubDirCcFlags [ FDefines TRACE_LOCKED_POOL=1 ] ;
}
KernelAddon locked_pool :
locked_pool.c
;

View File

@ -1,88 +0,0 @@
/*
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the MIT License.
*/
/*
Macros for double linked lists
*/
#ifndef _DL_LIST_H
#define _DL_LIST_H
#define REMOVE_DL_LIST( item, head, prefix ) \
do { \
if( item->prefix##prev ) \
item->prefix##prev->prefix##next = item->prefix##next; \
else \
head = item->prefix##next; \
\
if( item->prefix##next ) \
item->prefix##next->prefix##prev = item->prefix##prev; \
} while( 0 )
#define ADD_DL_LIST_HEAD( item, head, prefix ) \
do { \
item->prefix##next = head; \
item->prefix##prev = NULL; \
\
if( (head) ) \
(head)->prefix##prev = item; \
\
(head) = item; \
} while( 0 )
#define REMOVE_CDL_LIST( item, head, prefix ) \
do { \
item->prefix##next->prefix##prev = item->prefix##prev; \
item->prefix##prev->prefix##next = item->prefix##next; \
\
if( item == (head) ) { \
if( item->prefix##next != item ) \
(head) = item->prefix##next; \
else \
(head) = NULL; \
} \
} while( 0 )
#define ADD_CDL_LIST_TAIL( item, type, head, prefix ) \
do { \
type *old_head = head; \
\
if( old_head ) { \
type *first, *last; \
\
first = old_head; \
last = first->prefix##prev; \
\
item->prefix##next = first; \
item->prefix##prev = last; \
first->prefix##prev = item; \
last->prefix##next = item; \
} else { \
head = item; \
item->prefix##next = item->prefix##prev = item; \
} \
} while( 0 )
#define ADD_CDL_LIST_HEAD( item, type, head, prefix ) \
do { \
type *old_head = head; \
\
head = item; \
if( old_head ) { \
type *first, *last; \
\
first = old_head; \
last = first->prefix##prev; \
\
item->prefix##next = first; \
item->prefix##prev = last; \
first->prefix##prev = item; \
last->prefix##next = item; \
} else { \
item->prefix##next = item->prefix##prev = item; \
} \
} while( 0 )
#endif

View File

@ -1,553 +0,0 @@
/*
* Copyright 2008, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
/*! Deadlock-safe allocation of locked memory.
Allocation/freeing is optimized for speed. Count of <sem>
is the number of free blocks and thus should be modified
by each alloc() and free(). As this count is only crucial if
an allocation is waiting for a free block, <sem> is only
updated on demand - the correct number of free blocks is
stored in <free_blocks>. There are only three cases where
<sem> is updated:
- if an allocation fails because there is no free block left;
in this case, <num_waiting> increased by one and then the
thread makes <sem> up-to-date and waits for a free block
via <sem> in one step; finally, <num_waiting> is decreased
by one
- if a block is freed and <num_waiting> is non-zero;
here, count of <sem> is updated to release threads waiting
for allocation
- if a new chunk of blocks is allocated;
same as previous case
*/
#include <KernelExport.h>
#include <drivers/locked_pool.h>
#include <lock.h>
#include "dl_list.h"
#include <string.h>
#include <module.h>
#include <malloc.h>
//#define TRACE_LOCKED_POOL
#ifdef TRACE_LOCKED_POOL
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
// info about pool
typedef struct locked_pool {
struct mutex mutex; // to be used whenever some variable of the first
// block of this structure is read or modified
int free_blocks; // # free blocks
int num_waiting; // # waiting allocations
void *free_list; // list of free blocks
int next_ofs; // offset of next-pointer in block
sem_id sem; // count=number of free blocks
char *name;
size_t header_size; // effective size of chunk header
struct chunk_header *chunks;// list of chunks
size_t block_size; // size of memory block
uint32 lock_flags; // flags for lock_memory()
int min_free_blocks; // min. number of free blocks
int num_blocks; // cur. number of blocks
int max_blocks; // maximum number of blocks
int enlarge_by; // number of blocks to enlarge pool by
size_t alignment; // block alignment restrictions
locked_pool_add_hook add_hook; // user hooks
locked_pool_remove_hook remove_hook;
void *hook_arg; // arg for user hooks
struct locked_pool *prev, *next; // global cyclic list
} locked_pool;
// header of memory chunk
typedef struct chunk_header {
struct chunk_header *next; // free-list
area_id area; // underlying area
int num_blocks; // size in blocks
} chunk_header;
// global list of pools
static locked_pool *sLockedPools;
// mutex to protect sLockedPools
static mutex sLockedPoolsLock;
// true, if thread should shut down
static bool sShuttingDown;
// background thread to enlarge pools
static thread_id sEnlargerThread;
// semaphore to wake up enlarger thread
static sem_id sEnlargerSemaphore;
// macro to access next-pointer in free block
#define NEXT_PTR(pool, a) ((void **)(((char *)a) + pool->next_ofs))
/*! Enlarge memory pool by <num_block> blocks */
static status_t
enlarge_pool(locked_pool *pool, int numBlocks)
{
void **next;
int i;
int numWaiting;
status_t status;
area_id area;
chunk_header *chunk;
size_t chunkSize;
void *block, *lastBlock;
TRACE(("enlarge_pool()\n"));
// get memory directly from VM; we don't let user code access memory
chunkSize = numBlocks * pool->block_size + pool->header_size;
chunkSize = (chunkSize + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
status = area = create_area(pool->name,
(void **)&chunk, B_ANY_KERNEL_ADDRESS, chunkSize,
pool->lock_flags, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (status < B_OK) {
dprintf("cannot enlarge pool (%s)\n", strerror(status));
// TODO: we should wait a bit and try again!
return status;
}
chunk->area = area;
chunk->num_blocks = numBlocks;
// create free_list and call add-hook
// very important: we first create a freelist within the chunk,
// going from lower to higher addresses; at the end of the loop,
// "next" points to the head of the list and "lastBlock" to the
// last list node!
next = NULL;
lastBlock = (char *)chunk + pool->header_size +
(numBlocks-1) * pool->block_size;
for (i = 0, block = lastBlock; i < numBlocks;
++i, block = (char *)block - pool->block_size)
{
if (pool->add_hook) {
if ((status = pool->add_hook(block, pool->hook_arg)) < B_OK)
break;
}
*NEXT_PTR(pool, block) = next;
next = block;
}
if (i < numBlocks) {
// ups - pre-init failed somehow
// call remove-hook for blocks that we called add-hook for
int j;
for (block = lastBlock, j = 0; j < i; ++j,
block = (char *)block - pool->block_size) {
if (pool->remove_hook)
pool->remove_hook(block, pool->hook_arg);
}
// destroy area and give up
delete_area(chunk->area);
return status;
}
// add new blocks to pool
mutex_lock(&pool->mutex);
// see remarks about initialising list within chunk
*NEXT_PTR(pool, lastBlock) = pool->free_list;
pool->free_list = next;
chunk->next = pool->chunks;
pool->chunks = chunk;
pool->num_blocks += numBlocks;
pool->free_blocks += numBlocks;
TRACE(("done - num_blocks=%d, free_blocks=%d, num_waiting=%d\n",
pool->num_blocks, pool->free_blocks, pool->num_waiting));
numWaiting = min_c(pool->num_waiting, numBlocks);
pool->num_waiting -= numWaiting;
mutex_unlock(&pool->mutex);
// release threads that wait for empty blocks
release_sem_etc(pool->sem, numWaiting, 0);
return B_OK;
}
/*! Background thread that adjusts pool size */
static int32
enlarger_thread(void *arg)
{
while (1) {
locked_pool *pool;
acquire_sem(sEnlargerSemaphore);
if (sShuttingDown)
break;
// protect traversing of global list and
// block destroy_pool() to not clean up a pool we are enlarging
mutex_lock(&sLockedPoolsLock);
for (pool = sLockedPools; pool; pool = pool->next) {
int num_free;
// this mutex is probably not necessary (at least on 80x86)
// but I'm not sure about atomicity of other architectures
// (anyway - this routine is not performance critical)
mutex_lock(&pool->mutex);
num_free = pool->free_blocks;
mutex_unlock(&pool->mutex);
// perhaps blocks got freed meanwhile, i.e. pool is large enough
if (num_free > pool->min_free_blocks)
continue;
// enlarge pool as much as possible
// never create more blocks then defined - the caller may have
// a good reason for choosing the limit
if (pool->num_blocks < pool->max_blocks) {
enlarge_pool(pool,
min(pool->enlarge_by, pool->max_blocks - pool->num_blocks));
}
}
mutex_unlock(&sLockedPoolsLock);
}
return 0;
}
/*! Free all chunks belonging to pool */
static void
free_chunks(locked_pool *pool)
{
chunk_header *chunk, *next;
for (chunk = pool->chunks; chunk; chunk = next) {
int i;
void *block, *lastBlock;
next = chunk->next;
lastBlock = (char *)chunk + pool->header_size +
(chunk->num_blocks-1) * pool->block_size;
// don't forget to call remove-hook
for (i = 0, block = lastBlock; i < pool->num_blocks; ++i, block = (char *)block - pool->block_size) {
if (pool->remove_hook)
pool->remove_hook(block, pool->hook_arg);
}
delete_area(chunk->area);
}
pool->chunks = NULL;
}
/*! Global init, executed when module is loaded */
static status_t
init_locked_pool(void)
{
status_t status;
mutex_init(&sLockedPoolsLock, "locked_pool_global_list");
status = sEnlargerSemaphore = create_sem(0,
"locked_pool_enlarger");
if (status < B_OK)
goto err2;
sLockedPools = NULL;
sShuttingDown = false;
status = sEnlargerThread = spawn_kernel_thread(enlarger_thread,
"locked_pool_enlarger", B_NORMAL_PRIORITY, NULL);
if (status < B_OK)
goto err3;
resume_thread(sEnlargerThread);
return B_OK;
err3:
delete_sem(sEnlargerSemaphore);
err2:
mutex_destroy(&sLockedPoolsLock);
return status;
}
/*! Global uninit, executed before module is unloaded */
static status_t
uninit_locked_pool(void)
{
sShuttingDown = true;
release_sem(sEnlargerSemaphore);
wait_for_thread(sEnlargerThread, NULL);
delete_sem(sEnlargerSemaphore);
mutex_destroy(&sLockedPoolsLock);
return B_OK;
}
// #pragma mark - Module API
/*! Alloc memory from pool */
static void *
pool_alloc(locked_pool *pool)
{
void *block;
TRACE(("pool_alloc()\n"));
mutex_lock(&pool->mutex);
--pool->free_blocks;
if (pool->free_blocks > 0) {
// there are free blocks - grab one
TRACE(("freeblocks=%d, free_list=%p\n",
pool->free_blocks, pool->free_list));
block = pool->free_list;
pool->free_list = *NEXT_PTR(pool, block);
TRACE(("new free_list=%p\n", pool->free_list));
mutex_unlock(&pool->mutex);
return block;
}
// entire pool is in use
// we should do a ++free_blocks here, but this can lead to race
// condition: when we wait for <sem> and a block gets released
// and another thread calls alloc() before we grab the freshly freed
// block, the other thread could overtake us and grab the free block
// instead! by leaving free_block at a negative value, the other
// thread cannot see the free block and thus will leave it for us
// tell them we are waiting on semaphore
++pool->num_waiting;
TRACE(("%d waiting allocs\n", pool->num_waiting));
mutex_unlock(&pool->mutex);
// awake background thread
release_sem_etc(sEnlargerSemaphore, 1, B_DO_NOT_RESCHEDULE);
// make samphore up-to-date and wait until a block is available
acquire_sem(pool->sem);
mutex_lock(&pool->mutex);
TRACE(("continuing alloc (%d free blocks)\n", pool->free_blocks));
block = pool->free_list;
pool->free_list = *NEXT_PTR(pool, block);
mutex_unlock(&pool->mutex);
return block;
}
static void
pool_free(locked_pool *pool, void *block)
{
TRACE(("pool_free()\n"));
mutex_lock(&pool->mutex);
// add to free list
*NEXT_PTR(pool, block) = pool->free_list;
pool->free_list = block;
++pool->free_blocks;
TRACE(("freeblocks=%d, free_list=%p\n", pool->free_blocks,
pool->free_list));
if (pool->num_waiting == 0) {
// if no one is waiting, this is it
mutex_unlock(&pool->mutex);
return;
}
// someone is waiting on the semaphore
TRACE(("%d waiting allocs\n", pool->num_waiting));
pool->num_waiting--;
mutex_unlock(&pool->mutex);
// now it is up-to-date and waiting allocations can be continued
release_sem(pool->sem);
return;
}
static locked_pool *
create_pool(int block_size, int alignment, int next_ofs,
int chunkSize, int max_blocks, int min_free_blocks,
const char *name, uint32 lock_flags,
locked_pool_add_hook add_hook,
locked_pool_remove_hook remove_hook, void *hook_arg)
{
locked_pool *pool;
status_t status;
TRACE(("create_pool()\n"));
pool = (locked_pool *)malloc(sizeof(*pool));
if (pool == NULL)
return NULL;
memset(pool, 0, sizeof(*pool));
mutex_init(&pool->mutex, "locked_pool");
if ((status = pool->sem = create_sem(0, "locked_pool")) < 0)
goto err1;
if ((pool->name = strdup(name)) == NULL) {
status = B_NO_MEMORY;
goto err3;
}
pool->alignment = alignment;
// take care that there is always enough space to fulfill alignment
pool->block_size = (block_size + pool->alignment) & ~pool->alignment;
pool->next_ofs = next_ofs;
pool->lock_flags = lock_flags;
pool->header_size = max((sizeof( chunk_header ) + pool->alignment) & ~pool->alignment,
pool->alignment + 1);
pool->enlarge_by = (((chunkSize + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1)) - pool->header_size)
/ pool->block_size;
pool->max_blocks = max_blocks;
pool->min_free_blocks = min_free_blocks;
pool->free_blocks = 0;
pool->num_blocks = 0;
pool->num_waiting = 0;
pool->free_list = NULL;
pool->add_hook = add_hook;
pool->remove_hook = remove_hook;
pool->hook_arg = hook_arg;
pool->chunks = NULL;
TRACE(("block_size=%d, alignment=%d, next_ofs=%d, wiring_flags=%d, header_size=%d, enlarge_by=%d\n",
(int)pool->block_size, (int)pool->alignment, (int)pool->next_ofs,
(int)pool->lock_flags, (int)pool->header_size, pool->enlarge_by));
// if there is a minimum size, enlarge pool right now
if (min_free_blocks > 0) {
if ((status = enlarge_pool(pool, min(pool->enlarge_by, pool->max_blocks))) < 0)
goto err4;
}
// add to global list, so enlarger thread takes care of pool
mutex_lock(&sLockedPoolsLock);
ADD_DL_LIST_HEAD(pool, sLockedPools, );
mutex_unlock(&sLockedPoolsLock);
return pool;
err4:
free(pool->name);
err3:
delete_sem(pool->sem);
err1:
mutex_destroy(&pool->mutex);
free(pool);
return NULL;
}
static void
destroy_pool(locked_pool *pool)
{
TRACE(("destroy_pool()\n"));
// first, remove from global list, so enlarger thread
// won't touch this pool anymore
mutex_lock(&sLockedPoolsLock);
REMOVE_DL_LIST(pool, sLockedPools, );
mutex_unlock(&sLockedPoolsLock);
// then cleanup pool
free_chunks(pool);
free(pool->name);
delete_sem(pool->sem);
mutex_destroy(&pool->mutex);
free(pool);
}
static status_t
std_ops(int32 op, ...)
{
switch (op) {
case B_MODULE_INIT:
return init_locked_pool();
case B_MODULE_UNINIT:
return uninit_locked_pool();
default:
return B_ERROR;
}
}
locked_pool_interface interface = {
{
LOCKED_POOL_MODULE_NAME,
0,
std_ops
},
pool_alloc,
pool_free,
create_pool,
destroy_pool
};
module_info *modules[] = {
&interface.minfo,
NULL
};