network: Overhaul TUN/TAP subsystem.

* Rename the "tun" network device to "tunnel". FreeBSD calls theirs
   "tuntap" but speaks of both TUN and TAP devices as interfaces for
   tunnels. The other BSDs seem to do likewise.

 * Fold the "tun" driver into the "tunnel" network device. The
   network device now publishes entries in devfs when interfaces
   are created, and unpublishes them when interfaces are destroyed.

   This removes the need for the driver and device to communicate
   through a file descriptor, and thus allows the receive queue
   to be totally eliminated, massively simplifying that logic.

 * Use standard net-stack FIFOs instead of TCP BufferQueue, which is
   specialized to TCP's needs in far too many ways. Thanks to the
   previous commit adding support for interrupting semaphore waits,
   we can use the FIFO wait mechanisms, too.

 * Restructure the TAP logic, and generate MAC addresses more like
   Linux does.

 * Actually set type = IFT_TUN, and use the "loopback" frame handler
   instead of the "ethernet" frame handler. This allows significant
   cleanup of the header handling logic.

 * In TUN mode, reject packets that don't look like IP packets.

 * Delete "tunconfig"; it was mostly stubs and is now unnecessary.

TUN mode tested and confirmed as working by kallisti5 with OpenVPN.
TAP mode partially tested, but not yet confirmed as working.

Fixes #18673.

Change-Id: Ibd803139474e8db556a4f567901da15ee4083621
Reviewed-on: https://review.haiku-os.org/c/haiku/+/7143
Reviewed-by: Alex von Gluck IV <kallisti5@unixzen.com>
This commit is contained in:
Augustin Cavalier 2023-11-24 21:01:51 -05:00 committed by Alex von Gluck IV
parent daf1dd9c40
commit b6c24e6b40
17 changed files with 586 additions and 927 deletions

View File

@ -717,7 +717,6 @@ rule ArchitectureSetupWarnings architecture
EnableWerror src add-ons kernel drivers network ether virtio ;
# EnableWerror src add-ons kernel drivers network ether vt612x ;
EnableWerror src add-ons kernel drivers network ether wb840 ;
EnableWerror src add-ons kernel drivers network tun ;
# EnableWerror src add-ons kernel drivers network wlan ;
EnableWerror src add-ons kernel drivers network wwan ;
EnableWerror src add-ons kernel drivers ports ;

View File

@ -125,7 +125,7 @@ SYSTEM_SERVERS = [ FFilterByBuildFeatures
SYSTEM_NETWORK_DEVICES =
ethernet
loopback
tun
tunnel
;
SYSTEM_NETWORK_DATALINK_PROTOCOLS =
@ -206,7 +206,6 @@ SYSTEM_ADD_ONS_DRIVERS_NET = [ FFilterByBuildFeatures
rtl8125 rtl8139 rtl81xx
sis19x sis900 syskonnect
via_rhine vt612x
tun_driver
}@ # x86,x86_64,riscv64
etherpci

View File

@ -1,64 +1,9 @@
/*
* Copyright 2003-2018 Haiku, Inc. All Rights Reserved.
* Copyright 2023, Haiku, Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef _NET_IF_TUN_H
#define _NET_IF_TUN_H
#include <sys/sockio.h>
/*
* public API for tun/tap device.
* This API is compatible with the Linux tun/tap driver from
* http://vtun.sourceforge.net/tun/index.html
*/
/* max of each type */
#define TUN_MAX_DEV 10
/*255*/
/* TX queue size */
#define TUN_TXQ_SIZE 10
/* Max frame size */
#define TUN_MAX_FRAME 4096
/* TUN device flags */
#define TUN_TUN_DEV 0x0001
#define TUN_TAP_DEV 0x0002
#define TUN_TYPE_MASK 0x000f
/* not yet*//*#define TUN_FASYNC 0x0010*/
#define TUN_NOCHECKSUM 0x0020
#define TUN_NO_PI 0x0040
#define TUN_IFF_SET 0x1000
/* Ioctl defines */
/* XXX: NOT OFFICIAL */
#define TUNSETNOCSUM (B_DEVICE_OP_CODES_END+0x90)
#define TUNSETDEBUG (B_DEVICE_OP_CODES_END+0x91)
#define TUNSETIFF (B_DEVICE_OP_CODES_END+0x92)
/* get/set MAC address */
//#define SIOCGIFHWADDR (BONE_SOCKIO_IOCTL_BASE+0x95)
//#define SIOCSIFHWADDR (BONE_SOCKIO_IOCTL_BASE+0x96)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
#define IFF_TAP 0x0002
#define IFF_NO_PI 0x1000
/* XXX: fix the confusion about which *NO_PI go where ! */
struct tun_pi {
unsigned short flags;
unsigned short proto;
};
/* tun_pi::flags */
#define TUN_PKT_STRIP 0x0001
#define TUN_DEVICE "/dev/config/tun"
#endif /* __IF_TUN_H */
#endif /* _NET_IF_TUN_H */

View File

@ -1,24 +0,0 @@
/*
* Copyright 2008-2019, Haiku, Inc. All Rights Reserved.
* Distributed under the terms of the MIT license.
*/
#ifndef NET_TUN_H
#define NET_TUN_H
#include <sys/socket.h>
#include <net_device.h>
// name of the kernel tun interface
#define NET_TUN_MODULE_NAME "network/devices/tun/v1"
struct tun_module_info {
struct net_device_module_info;
status_t (*tun_read)(net_device* device, net_buffer* buffer);
status_t (*tun_write)(net_device* device, net_buffer** _buffer);
};
#endif // NET_TUN_H

View File

@ -1,6 +1,5 @@
SubDir HAIKU_TOP src add-ons kernel drivers network ;
HaikuSubInclude ether ;
HaikuSubInclude tun ;
HaikuSubInclude wlan ;
HaikuSubInclude wwan ;

View File

@ -1,15 +0,0 @@
SubDir HAIKU_TOP src add-ons kernel drivers network tun ;
SubDirHdrs [ FDirName $(HAIKU_TOP) src add-ons kernel network protocols tcp ] ;
UsePrivateHeaders drivers kernel net ;
UsePrivateKernelHeaders ;
UsePrivateSystemHeaders ;
KernelAddon tun_driver :
BufferQueue.cpp
driver.cpp
;
SEARCH on [ FGristFiles
BufferQueue.cpp
] = [ FDirName $(HAIKU_TOP) src add-ons kernel network protocols tcp ] ;

View File

@ -1,313 +0,0 @@
/*
* TUN/TAP Network Tunnel Driver for Haiku
* Copyright 2003 mmu_man, revol@free.fr
* Copyright 2023 Sean Brady, swangeon@gmail.com
*
* All rights reserved. Distributed under the terms of the MIT License.
*/
#include <Drivers.h>
#include <KernelExport.h>
#include <OS.h>
#include <Select.h>
#include "BufferQueue.h"
#include <net_buffer.h>
#include <condition_variable.h>
#include <fs/select_sync_pool.h>
#include <net/if_tun.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <sys/param.h>
#include <sys/types.h>
#include <unistd.h>
#include <util/AutoLock.h>
// #define TUN_DRIVER_NAME "tun/0"
#define TAP_DRIVER_NAME "tap/0"
#define NET_TUN_MODULE_NAME "network/devices/tun/v1"
#define BUFFER_QUEUE_MAX 30000
const char* device_names[] = {TAP_DRIVER_NAME, NULL};
typedef struct tun_struct {
uint32_t name[3];
unsigned long flags;
BufferQueue* sendQueue;
BufferQueue* recvQueue;
ConditionVariable* readWait;
mutex readLock;
union {
struct {
mutex selectLock;
} app;
};
} tun_struct;
int32 api_version = B_CUR_DRIVER_API_VERSION;
static BufferQueue gAppQueue(BUFFER_QUEUE_MAX);
static BufferQueue gIntQueue(BUFFER_QUEUE_MAX);
static ConditionVariable gIntWait;
static select_sync_pool* gSelectPool = NULL;
struct net_buffer_module_info* gBufferModule;
static net_buffer*
create_filled_buffer(uint8* data, size_t bytes)
{
net_buffer* buffer = gBufferModule->create(256);
if (buffer == NULL)
return NULL;
status_t status = gBufferModule->append(buffer, data, bytes);
if (status != B_OK) {
gBufferModule->free(buffer);
return NULL;
}
return buffer;
}
static status_t
retrieve_packet(BufferQueue* queueToUse, void* data, size_t* numbytes)
{
net_buffer* buffer;
status_t status = B_OK;
if (queueToUse->Used() > 0) {
status_t status = queueToUse->Get(*numbytes, true, &buffer);
if (status != B_OK)
return status;
*numbytes = buffer->size;
status = gBufferModule->read(buffer, 0, data, *numbytes);
gBufferModule->free(buffer);
} else
*numbytes = 0;
return status;
}
static void
notify_select_helper(bool readable, bool writable)
{
if (readable)
notify_select_event_pool(gSelectPool, B_SELECT_READ);
if (writable)
notify_select_event_pool(gSelectPool, B_SELECT_WRITE);
}
static void
tun_notify(tun_struct* tun)
{
/* This function is just for the APP side but both sides need to call it */
bool select_pool_check = gSelectPool != NULL;
if (select_pool_check) {
bool readable = tun->recvQueue->Used() > 0;
bool writable = tun->sendQueue->Used() < BUFFER_QUEUE_MAX;
if ((tun->flags & O_NONBLOCK) != 0) // APP Side
notify_select_helper(readable, writable);
else /* IFACE side switches since its queues are flipped from APP*/
notify_select_helper(writable, readable);
}
}
status_t
init_hardware(void)
{
/* No Hardware */
dprintf("tun:init_hardware()\n");
return B_NO_ERROR;
}
status_t
init_driver(void)
{
/* Init driver */
dprintf("tun:init_driver()\n");
status_t status = get_module(NET_BUFFER_MODULE_NAME, (module_info**)&gBufferModule);
if (status != B_OK)
return status;
return B_OK;
}
void
uninit_driver(void)
{
dprintf("tun:uninit_driver()\n");
put_module(NET_BUFFER_MODULE_NAME);
}
status_t
tun_open(const char* name, uint32 flags, void** cookie)
{
/* Setup driver for interface here */
tun_struct* tun = new tun_struct();
memcpy(tun->name, "app", sizeof(tun->name));
tun->sendQueue = &gIntQueue;
tun->recvQueue = &gAppQueue;
tun->flags = 0;
tun->readWait = &gIntWait;
mutex_init(&tun->readLock, "read_avail");
mutex_init(&tun->app.selectLock, "select_lock");
*cookie = static_cast<void*>(tun);
return B_OK;
}
status_t
tun_close(void* cookie)
{
/* Close interface here */
tun_struct* tun = static_cast<tun_struct*>(cookie);
if ((tun->flags & O_NONBLOCK) == 0) {
tun->readWait->NotifyAll(B_ERROR);
snooze(10); // Due to a lock timing issue, we sleep for 10ms
}
return B_OK;
}
status_t
tun_free(void* cookie)
{
tun_struct* tun = static_cast<tun_struct*>(cookie);
mutex_destroy(&tun->readLock);
if ((tun->flags & O_NONBLOCK) != 0)
mutex_destroy(&tun->app.selectLock);
delete tun;
return B_OK;
}
status_t
tun_ioctl(void* cookie, uint32 op, void* data, size_t len)
{
/* IOCTL for driver */
tun_struct* tun = static_cast<tun_struct*>(cookie);
switch (op) {
case TUNSETIFF: // Reconfigures tun_struct to interface settings
memcpy(tun->name, "int", sizeof(tun->name));
tun->sendQueue = &gAppQueue;
tun->recvQueue = &gIntQueue;
mutex_destroy(&tun->app.selectLock);
memset(&tun->app, 0, sizeof(tun->app));
return B_OK;
case B_SET_NONBLOCKING_IO:
tun->flags |= O_NONBLOCK;
return B_OK;
default:
return B_DEV_INVALID_IOCTL;
};
return B_OK;
}
status_t
tun_read(void* cookie, off_t position, void* data, size_t* numbytes)
{
tun_struct* tun = static_cast<tun_struct*>(cookie);
status_t status;
MutexLocker _(tun->readLock); // released on exit
/* IFACE side is blocking I/O */
if ((tun->flags & O_NONBLOCK) == 0) {
while (tun->recvQueue->Used() == 0) {
status = tun->readWait->Wait(&tun->readLock, B_CAN_INTERRUPT);
if (status != B_OK)
return status;
}
}
status = retrieve_packet(tun->recvQueue, data, numbytes);
tun_notify(tun);
return status;
}
status_t
tun_write(void* cookie, off_t position, const void* data, size_t* numbytes)
{
tun_struct* tun = static_cast<tun_struct*>(cookie);
size_t used = tun->sendQueue->Used();
// Buffer is full or will be so we have to drop the packet
if ((used + *numbytes) >= BUFFER_QUEUE_MAX)
return B_WOULD_BLOCK;
net_buffer* packet = create_filled_buffer((uint8*)data, *numbytes);
if (packet == NULL)
return B_ERROR;
tun->sendQueue->Add(packet);
if ((tun->flags & O_NONBLOCK) != 0)
tun->readWait->NotifyOne();
tun_notify(tun);
return B_OK;
}
status_t
tun_select(void* cookie, uint8 event, uint32 ref, selectsync* sync)
{
tun_struct* tun = static_cast<tun_struct*>(cookie);
MutexLocker _(tun->app.selectLock);
bool isReadable = tun->recvQueue->Used() > 0;
bool isWritable = tun->sendQueue->Used() < BUFFER_QUEUE_MAX;
if (event != B_SELECT_READ && event != B_SELECT_WRITE)
return B_BAD_VALUE;
status_t status = add_select_sync_pool_entry(&gSelectPool, sync, event);
if (status != B_OK)
return B_BAD_VALUE;
if (event == B_SELECT_READ && isReadable)
notify_select_event(sync, event);
if (event == B_SELECT_WRITE && isWritable)
notify_select_event(sync, event);
return status;
}
status_t
tun_deselect(void* cookie, uint8 event, selectsync* sync)
{
tun_struct* tun = static_cast<tun_struct*>(cookie);
MutexLocker _(tun->app.selectLock);
if (event != B_SELECT_READ && event != B_SELECT_WRITE)
return B_BAD_VALUE;
return remove_select_sync_pool_entry(&gSelectPool, sync, event);
}
const char**
publish_devices()
{
return device_names;
}
device_hooks tun_hooks = {
tun_open,
tun_close,
tun_free,
tun_ioctl,
tun_read,
tun_write,
tun_select,
tun_deselect,
NULL,
NULL
};
device_hooks*
find_device(const char* name)
{
return &tun_hooks;
}

View File

@ -50,8 +50,7 @@ status_t
loopback_frame_init(struct net_interface*interface, net_domain* domain,
net_datalink_protocol** _protocol)
{
// We only support a single type!
if (interface->device->type != IFT_LOOP)
if (interface->device->type != IFT_LOOP && interface->device->type != IFT_TUN)
return B_BAD_TYPE;
loopback_frame_protocol* protocol;
@ -65,8 +64,15 @@ loopback_frame_init(struct net_interface*interface, net_domain* domain,
if (status != B_OK)
goto err1;
// Locally received buffers don't need a domain device handler, as the
// buffer reception is handled internally.
if (interface->device->type == IFT_LOOP) {
// Locally received buffers don't need a domain device handler, as the
// buffer reception is handled internally.
} else if (interface->device->type == IFT_TUN) {
status = stack->register_domain_device_handler(
interface->device, B_NET_FRAME_TYPE(IFT_ETHER, ETHER_TYPE_IP), domain);
if (status != B_OK)
return status;
}
protocol = new(std::nothrow) loopback_frame_protocol;
if (protocol == NULL) {
@ -105,6 +111,9 @@ loopback_frame_uninit(net_datalink_protocol* protocol)
status_t
loopback_frame_send_data(net_datalink_protocol* protocol, net_buffer* buffer)
{
// Packet capture expects ethernet frames, so we apply framing
// (and deframing) even for loopback packets.
NetBufferPrepend<ether_header> bufferHeader(buffer);
if (bufferHeader.Status() != B_OK)
return bufferHeader.Status();

View File

@ -3,5 +3,4 @@ SubDir HAIKU_TOP src add-ons kernel network devices ;
SubInclude HAIKU_TOP src add-ons kernel network devices dialup ;
SubInclude HAIKU_TOP src add-ons kernel network devices ethernet ;
SubInclude HAIKU_TOP src add-ons kernel network devices loopback ;
SubInclude HAIKU_TOP src add-ons kernel network devices tun ;
SubInclude HAIKU_TOP src add-ons kernel network devices tunnel ;

View File

@ -1,13 +0,0 @@
SubDir HAIKU_TOP src add-ons kernel network devices tun ;
UsePrivateKernelHeaders ;
UsePrivateHeaders net ;
KernelAddon tun :
tun.cpp
;
# Installation
HaikuInstall install-tun
: /boot/home/config/non-packaged/add-ons/kernel/network/devices/
: tun ;

View File

@ -1,398 +0,0 @@
/*
* Copyright 2023 Haiku, Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* Axel Dörfler, axeld@pinc-software.de
* Sean Brady, swangeon@gmail.com
*/
#include <ethernet.h>
#include <net_buffer.h>
#include <net_datalink.h>
#include <net_device.h>
#include <net_stack.h>
#include <net_tun.h>
#include <ByteOrder.h>
#include <Drivers.h>
#include <lock.h>
#include <util/AutoLock.h>
#include <util/DoublyLinkedList.h>
#include <KernelExport.h>
#include <NetBufferUtilities.h>
#include <debug.h>
#include <errno.h>
#include <net/if.h>
#include <net/if_media.h>
#include <net/if_tun.h>
#include <net/if_types.h>
#include <new>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <util/Random.h>
struct tun_device : net_device, DoublyLinkedListLinkImpl<tun_device>
{
~tun_device()
{
free(read_buffer);
free(write_buffer);
}
int fd;
uint32 frame_size;
void *read_buffer, *write_buffer;
mutex read_buffer_lock, write_buffer_lock;
};
struct net_buffer_module_info* gBufferModule;
static struct net_stack_module_info* sStackModule;
// #pragma mark -
static status_t
prepend_ethernet_frame(net_buffer* buffer)
{
NetBufferPrepend<ether_header> bufferHeader(buffer);
if (bufferHeader.Status() != B_OK)
return bufferHeader.Status();
ether_header &header = bufferHeader.Data();
header.type = B_HOST_TO_BENDIAN_INT16(ETHER_TYPE_IP);
memset(header.source, 0, ETHER_ADDRESS_LENGTH);
memset(header.destination, 0, ETHER_ADDRESS_LENGTH);
bufferHeader.Sync();
return B_OK;
}
static status_t
ethernet_header_deframe(net_buffer* buffer)
{
NetBufferHeaderRemover<ether_header> bufferHeader(buffer);
if (bufferHeader.Status() != B_OK)
return bufferHeader.Status();
return B_OK;
}
status_t
tun_init(const char* name, net_device** _device)
{
if (strncmp(name, "tun", 3)
&& strncmp(name, "tap", 3)
&& strncmp(name, "dns", 3)) /* iodine uses that */
return B_BAD_VALUE;
tun_device* device = new (std::nothrow) tun_device;
if (device == NULL)
return B_NO_MEMORY;
memset(device, 0, sizeof(tun_device));
strcpy(device->name, name);
if (strncmp(name, "tun", 3) == 0) {
device->flags = IFF_POINTOPOINT | IFF_LINK;
device->type = IFT_ETHER;
} else if (strncmp(name, "tap", 3) == 0) {
device->flags = IFF_BROADCAST | IFF_ALLMULTI | IFF_LINK;
device->type = IFT_ETHER;
/* Set the first two bits to prevent it from becoming a multicast address */
device->address.data[0] = 0x00;
device->address.data[1] = 0xFF;
for (int i = 2; i < ETHER_ADDRESS_LENGTH; i++) {
int val = random_value();
device->address.data[i] = val * 0x11;
}
device->address.length = ETHER_ADDRESS_LENGTH;
} else
return B_BAD_VALUE;
device->mtu = 1500;
device->media = IFM_ACTIVE | IFM_ETHER;
device->frame_size = ETHER_MAX_FRAME_SIZE;
device->header_length = ETHER_HEADER_LENGTH;
device->fd = -1;
mutex_init(&device->read_buffer_lock, "tun read_buffer");
mutex_init(&device->write_buffer_lock, "tun write_buffer");
*_device = device;
return B_OK;
}
status_t
tun_uninit(net_device* _device)
{
tun_device* device = (tun_device*)_device;
close(device->fd);
device->fd = -1;
mutex_destroy(&device->read_buffer_lock);
mutex_destroy(&device->write_buffer_lock);
delete device;
return B_OK;
}
status_t
tun_up(net_device* _device)
{
tun_device* device = (tun_device*)_device;
device->fd = open("/dev/tap/0", O_RDWR);
if (device->fd < 0)
return errno;
ioctl(device->fd, TUNSETIFF);
return B_OK;
}
void
tun_down(net_device* _device)
{
tun_device* device = (tun_device*)_device;
close(device->fd);
device->fd = -1;
}
status_t
tun_control(net_device* device, int32 op, void* argument, size_t length)
{
return B_BAD_VALUE;
}
status_t
tun_send_data(net_device* _device, net_buffer* buffer)
{
tun_device* device = (tun_device*)_device;
status_t status;
if (strncmp(device->name, "tun", 3) == 0) {
status = ethernet_header_deframe(buffer);
if (status != B_OK)
return B_ERROR;
}
if (buffer->size > device->mtu)
return B_BAD_VALUE;
net_buffer* allocated = NULL;
net_buffer* original = buffer;
MutexLocker bufferLocker;
struct iovec iovec;
if (gBufferModule->count_iovecs(buffer) > 1) {
if (device->write_buffer != NULL) {
bufferLocker.SetTo(device->write_buffer_lock, false);
status_t status = gBufferModule->read(buffer, 0,
device->write_buffer, buffer->size);
if (status != B_OK)
return status;
iovec.iov_base = device->write_buffer;
iovec.iov_len = buffer->size;
} else {
// Fall back to creating a new buffer.
allocated = gBufferModule->duplicate(original);
if (allocated == NULL)
return ENOBUFS;
buffer = allocated;
if (gBufferModule->count_iovecs(allocated) > 1) {
dprintf("tun_send_data: no write buffer, cannot perform scatter I/O\n");
gBufferModule->free(allocated);
device->stats.send.errors++;
return B_NOT_SUPPORTED;
}
gBufferModule->get_iovecs(buffer, &iovec, 1);
}
} else {
gBufferModule->get_iovecs(buffer, &iovec, 1);
}
ssize_t bytesWritten = write(device->fd, iovec.iov_base, iovec.iov_len);
if (bytesWritten < 0) {
device->stats.send.errors++;
if (allocated)
gBufferModule->free(allocated);
return errno;
}
device->stats.send.packets++;
device->stats.send.bytes += bytesWritten;
gBufferModule->free(original);
if (allocated)
gBufferModule->free(allocated);
return B_OK;
}
status_t
tun_receive_data(net_device* _device, net_buffer** _buffer)
{
tun_device* device = (tun_device*)_device;
if (device->fd == -1)
return B_FILE_ERROR;
// TODO: better header space
net_buffer* buffer = gBufferModule->create(256);
if (buffer == NULL)
return ENOBUFS;
MutexLocker bufferLocker;
struct iovec iovec;
size_t bytesRead;
status_t status;
if (device->read_buffer != NULL) {
bufferLocker.SetTo(device->read_buffer_lock, false);
iovec.iov_base = device->read_buffer;
iovec.iov_len = device->frame_size;
} else {
void* data;
status = gBufferModule->append_size(buffer, device->mtu, &data);
if (status == B_OK && data == NULL) {
dprintf("tun_receive_data: no read buffer, cannot perform scattered I/O!\n");
status = B_NOT_SUPPORTED;
}
if (status < B_OK) {
gBufferModule->free(buffer);
return status;
}
iovec.iov_base = data;
iovec.iov_len = device->frame_size;
}
bytesRead = read(device->fd, iovec.iov_base, iovec.iov_len);
if (bytesRead < 0 || iovec.iov_base == NULL) {
device->stats.receive.errors++;
status = errno;
gBufferModule->free(buffer);
return status;
}
if (strncmp(device->name, "tun", 3) == 0) {
status = prepend_ethernet_frame(buffer);
if (status != B_OK)
return status;
}
if (iovec.iov_base == device->read_buffer)
status = gBufferModule->append(buffer, iovec.iov_base, buffer->size);
else
status = gBufferModule->trim(buffer, buffer->size);
if (status < B_OK) {
device->stats.receive.dropped++;
gBufferModule->free(buffer);
return status;
}
device->stats.receive.bytes += bytesRead;
device->stats.receive.packets++;
*_buffer = buffer;
return B_OK;
}
status_t
tun_set_mtu(net_device* device, size_t mtu)
{
if (mtu > 65536 || mtu < 16)
return B_BAD_VALUE;
device->mtu = mtu;
return B_OK;
}
status_t
tun_set_promiscuous(net_device* device, bool promiscuous)
{
return EOPNOTSUPP;
}
status_t
tun_set_media(net_device* device, uint32 media)
{
return EOPNOTSUPP;
}
status_t
tun_add_multicast(net_device* device, const sockaddr* address)
{
// Nothing to do for multicast filters as we always accept all frames.
return B_OK;
}
status_t
tun_remove_multicast(net_device* device, const sockaddr* address)
{
return B_OK;
}
static status_t
tun_std_ops(int32 op, ...)
{
switch (op) {
case B_MODULE_INIT:
{
status_t status = get_module(NET_STACK_MODULE_NAME, (module_info**)&sStackModule);
if (status < B_OK)
return status;
status = get_module(NET_BUFFER_MODULE_NAME, (module_info**)&gBufferModule);
if (status < B_OK) {
put_module(NET_STACK_MODULE_NAME);
return status;
}
return B_OK;
}
case B_MODULE_UNINIT:
put_module(NET_BUFFER_MODULE_NAME);
put_module(NET_STACK_MODULE_NAME);
return B_OK;
default:
return B_ERROR;
}
}
net_device_module_info sTunModule = {
{
"network/devices/tun/v1",
0,
tun_std_ops
},
tun_init,
tun_uninit,
tun_up,
tun_down,
tun_control,
tun_send_data,
tun_receive_data,
tun_set_mtu,
tun_set_promiscuous,
tun_set_media,
tun_add_multicast,
tun_remove_multicast,
};
module_info* modules[] = {
(module_info*)&sTunModule,
NULL
};

View File

@ -0,0 +1,8 @@
SubDir HAIKU_TOP src add-ons kernel network devices tunnel ;
UsePrivateKernelHeaders ;
UsePrivateHeaders net ;
KernelAddon tunnel :
tunnel.cpp
;

View File

@ -0,0 +1,560 @@
/*
* Copyright 2023, Haiku, Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* Augustin Cavalier <waddlesplash>
* Axel Dörfler, axeld@pinc-software.de
* Sean Brady, swangeon@gmail.com
*/
#include <new>
#include <string.h>
#include <fs/select_sync_pool.h>
#include <fs/devfs.h>
#include <util/AutoLock.h>
#include <util/Random.h>
#include <net_buffer.h>
#include <net_device.h>
#include <net_stack.h>
#include <NetBufferUtilities.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_tun.h>
#include <netinet/in.h>
#include <ethernet.h>
//#define TRACE_TUNNEL
#ifdef TRACE_TUNNEL
# define TRACE(x...) dprintf("network/tunnel: " x)
#else
# define TRACE(x...)
#endif
#define CALLED(x...) TRACE("CALLED %s\n", __PRETTY_FUNCTION__)
#define TRACE_ALWAYS(x...) dprintf("network/tunnel: " x)
struct tunnel_device : net_device {
bool is_tap;
net_fifo send_queue, receive_queue;
int32 open_count;
mutex select_lock;
select_sync_pool* select_pool;
};
#define TUNNEL_QUEUE_MAX (ETHER_MAX_FRAME_SIZE * 32)
struct net_buffer_module_info* gBufferModule;
static net_stack_module_info* gStackModule;
// #pragma mark - devices array
static tunnel_device* gDevices[10] = {};
static mutex gDevicesLock = MUTEX_INITIALIZER("tunnel devices");
static tunnel_device*
find_tunnel_device(const char* name)
{
ASSERT_LOCKED_MUTEX(&gDevicesLock);
for (size_t i = 0; i < B_COUNT_OF(gDevices); i++) {
if (gDevices[i] == NULL)
continue;
if (strcmp(gDevices[i]->name, name) == 0)
return gDevices[i];
}
return NULL;
}
// #pragma mark - devfs device
struct tunnel_cookie {
tunnel_device* device;
uint32 flags;
};
status_t
tunnel_open(const char* name, uint32 flags, void** _cookie)
{
MutexLocker devicesLocker(gDevicesLock);
tunnel_device* device = find_tunnel_device(name);
if (device == NULL)
return ENODEV;
if (atomic_or(&device->open_count, 1) != 0)
return EBUSY;
tunnel_cookie* cookie = new(std::nothrow) tunnel_cookie;
if (cookie == NULL)
return B_NO_MEMORY;
cookie->device = device;
cookie->flags = flags;
*_cookie = cookie;
return B_OK;
}
status_t
tunnel_close(void* _cookie)
{
tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
// Wake up the send queue, so that any threads waiting to read return at once.
release_sem_etc(cookie->device->send_queue.notify, B_INTERRUPTED, B_RELEASE_ALL);
return B_OK;
}
status_t
tunnel_free(void* _cookie)
{
tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
atomic_and(&cookie->device->open_count, 0);
delete cookie;
return B_OK;
}
status_t
tunnel_control(void* _cookie, uint32 op, void* data, size_t len)
{
tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
switch (op) {
case B_SET_NONBLOCKING_IO:
cookie->flags |= O_NONBLOCK;
return B_OK;
case B_SET_BLOCKING_IO:
cookie->flags &= ~O_NONBLOCK;
return B_OK;
}
return B_DEV_INVALID_IOCTL;
}
status_t
tunnel_read(void* _cookie, off_t position, void* data, size_t* _length)
{
tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
net_buffer* buffer = NULL;
status_t status = gStackModule->fifo_dequeue_buffer(
&cookie->device->send_queue, 0, B_INFINITE_TIMEOUT, &buffer);
if (status != B_OK)
return status;
size_t offset = 0;
if (!cookie->device->is_tap) {
// TUN: Skip ethernet header.
offset = ETHER_HEADER_LENGTH;
}
const size_t length = min_c(*_length, buffer->size - offset);
status = gBufferModule->read(buffer, offset, data, length);
if (status != B_OK)
return status;
*_length = length;
gBufferModule->free(buffer);
return B_OK;
}
status_t
tunnel_write(void* _cookie, off_t position, const void* data, size_t* _length)
{
tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
net_buffer* buffer = gBufferModule->create(256);
if (buffer == NULL)
return B_NO_MEMORY;
status_t status = gBufferModule->append(buffer, data, *_length);
if (status != B_OK) {
gBufferModule->free(buffer);
return status;
}
if (!cookie->device->is_tap) {
// TUN: Detect packet type and prepend ethernet header.
uint8 version;
status = gBufferModule->read(buffer, 0, &version, 1);
if (status != B_OK) {
gBufferModule->free(buffer);
return status;
}
version = (version & 0xF0) >> 4;
if (version != 4 && version != 6) {
// Not any IP packet we recognize.
gBufferModule->free(buffer);
return B_BAD_DATA;
}
buffer->type = (version == 6) ? B_NET_FRAME_TYPE_IPV6
: B_NET_FRAME_TYPE_IPV4;
NetBufferPrepend<ether_header> bufferHeader(buffer);
if (bufferHeader.Status() != B_OK) {
gBufferModule->free(buffer);
return bufferHeader.Status();
}
ether_header &header = bufferHeader.Data();
header.type = (version == 6) ? htons(ETHER_TYPE_IPV6)
: htons(ETHER_TYPE_IP);
memset(header.source, 0, ETHER_ADDRESS_LENGTH);
memset(header.destination, 0, ETHER_ADDRESS_LENGTH);
bufferHeader.Sync();
// At least sdl_type and sdl_e_type must be set.
struct sockaddr_dl& src = *(struct sockaddr_dl*)buffer->source;
struct sockaddr_dl& dst = *(struct sockaddr_dl*)buffer->destination;
src.sdl_len = dst.sdl_len = sizeof(sockaddr_dl);
src.sdl_family = dst.sdl_family = AF_LINK;
src.sdl_index = dst.sdl_index = cookie->device->index;
src.sdl_type = dst.sdl_type = IFT_ETHER;
src.sdl_e_type = dst.sdl_e_type = header.type;
src.sdl_nlen = src.sdl_slen = dst.sdl_nlen = dst.sdl_slen = 0;
src.sdl_alen = dst.sdl_alen = 0;
}
// We use a queue and the receive_data() hook instead of device_enqueue_buffer()
// for two reasons: 1. listeners (e.g. packet capture) are only processed by the
// reader thread that calls receive_data(), and 2. device_enqueue_buffer() has
// to look up the device interface every time, which is inefficient.
status = gStackModule->fifo_enqueue_buffer(&cookie->device->receive_queue, buffer);
if (status != B_OK)
gBufferModule->free(buffer);
if (status == B_OK) {
atomic_add((int32*)&cookie->device->stats.receive.packets, 1);
atomic_add64((int64*)&cookie->device->stats.receive.bytes, buffer->size);
} else {
atomic_add((int32*)&cookie->device->stats.receive.errors, 1);
}
return status;
}
status_t
tunnel_select(void* _cookie, uint8 event, uint32 ref, selectsync* sync)
{
tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
if (event != B_SELECT_READ && event != B_SELECT_WRITE)
return B_BAD_VALUE;
MutexLocker selectLocker(cookie->device->select_lock);
status_t status = add_select_sync_pool_entry(&cookie->device->select_pool, sync, event);
if (status != B_OK)
return B_BAD_VALUE;
selectLocker.Unlock();
MutexLocker fifoLocker(cookie->device->send_queue.lock);
if (event == B_SELECT_READ && cookie->device->send_queue.current_bytes != 0)
notify_select_event(sync, event);
if (event == B_SELECT_WRITE)
notify_select_event(sync, event);
return B_OK;
}
status_t
tunnel_deselect(void* _cookie, uint8 event, selectsync* sync)
{
tunnel_cookie* cookie = (tunnel_cookie*)_cookie;
MutexLocker selectLocker(cookie->device->select_lock);
if (event != B_SELECT_READ && event != B_SELECT_WRITE)
return B_BAD_VALUE;
return remove_select_sync_pool_entry(&cookie->device->select_pool, sync, event);
}
static device_hooks sDeviceHooks = {
tunnel_open,
tunnel_close,
tunnel_free,
tunnel_control,
tunnel_read,
tunnel_write,
tunnel_select,
tunnel_deselect,
};
// #pragma mark - network stack device
status_t
tunnel_init(const char* name, net_device** _device)
{
const bool isTAP = strncmp(name, "tap/", 4) == 0;
if (!isTAP && strncmp(name, "tun/", 4) != 0)
return B_BAD_VALUE;
if (strlen(name) >= sizeof(tunnel_device::name))
return ENAMETOOLONG;
// Make sure this device doesn't already exist.
MutexLocker devicesLocker(gDevicesLock);
if (find_tunnel_device(name) != NULL)
return EEXIST;
tunnel_device* device = new(std::nothrow) tunnel_device;
if (device == NULL)
return B_NO_MEMORY;
ssize_t index = -1;
for (size_t i = 0; i < B_COUNT_OF(gDevices); i++) {
if (gDevices[i] != NULL)
continue;
gDevices[i] = device;
index = i;
break;
}
if (index < 0) {
delete device;
return ENOSPC;
}
devicesLocker.Unlock();
memset(device, 0, sizeof(tunnel_device));
strcpy(device->name, name);
device->mtu = ETHER_MAX_FRAME_SIZE;
device->media = IFM_ACTIVE;
device->is_tap = isTAP;
if (device->is_tap) {
device->flags = IFF_BROADCAST | IFF_ALLMULTI | IFF_LINK;
device->type = IFT_ETHER;
// Generate a random MAC address.
for (int i = 0; i < ETHER_ADDRESS_LENGTH; i++)
device->address.data[i] = secure_get_random<uint8>();
device->address.data[0] &= 0xFE; // multicast
device->address.data[0] |= 0x02; // local assignment
device->address.length = ETHER_ADDRESS_LENGTH;
} else {
device->flags = IFF_POINTOPOINT | IFF_LINK;
device->type = IFT_TUN;
}
status_t status = gStackModule->init_fifo(&device->send_queue,
"tunnel send queue", TUNNEL_QUEUE_MAX);
if (status != B_OK) {
delete device;
return status;
}
status = gStackModule->init_fifo(&device->receive_queue,
"tunnel receive queue", TUNNEL_QUEUE_MAX);
if (status != B_OK) {
gStackModule->uninit_fifo(&device->send_queue);
delete device;
return status;
}
mutex_init(&device->select_lock, "tunnel select lock");
status = devfs_publish_device(name, &sDeviceHooks);
if (status != B_OK) {
gStackModule->uninit_fifo(&device->send_queue);
gStackModule->uninit_fifo(&device->receive_queue);
delete device;
return status;
}
*_device = device;
return B_OK;
}
status_t
tunnel_uninit(net_device* _device)
{
tunnel_device* device = (tunnel_device*)_device;
MutexLocker devicesLocker(gDevicesLock);
if (atomic_get(&device->open_count) != 0)
return EBUSY;
for (size_t i = 0; i < B_COUNT_OF(gDevices); i++) {
if (gDevices[i] != device)
continue;
gDevices[i] = NULL;
break;
}
status_t status = devfs_unpublish_device(device->name, false);
if (status != B_OK)
panic("devfs_unpublish_device failed: %" B_PRId32, status);
gStackModule->uninit_fifo(&device->send_queue);
gStackModule->uninit_fifo(&device->receive_queue);
mutex_destroy(&device->select_lock);
delete device;
return B_OK;
}
status_t
tunnel_up(net_device* _device)
{
return B_OK;
}
void
tunnel_down(net_device* _device)
{
tunnel_device* device = (tunnel_device*)_device;
// Wake up the receive queue, so that the reader thread returns at once.
release_sem_etc(device->receive_queue.notify, B_INTERRUPTED, B_RELEASE_ALL);
}
status_t
tunnel_control(net_device* device, int32 op, void* argument, size_t length)
{
return B_BAD_VALUE;
}
status_t
tunnel_send_data(net_device* _device, net_buffer* buffer)
{
tunnel_device* device = (tunnel_device*)_device;
status_t status = B_OK;
if (!device->is_tap) {
// Ensure this is an IP frame.
uint16 type;
status = gBufferModule->read(buffer, offsetof(ether_header, type),
&type, sizeof(type));
if (status != B_OK)
return status;
if (type != htons(ETHER_TYPE_IP) && type != htons(ETHER_TYPE_IPV6))
return B_BAD_DATA;
}
status = gStackModule->fifo_enqueue_buffer(
&device->send_queue, buffer);
if (status == B_OK) {
atomic_add((int32*)&device->stats.send.packets, 1);
atomic_add64((int64*)&device->stats.send.bytes, buffer->size);
} else {
atomic_add((int32*)&device->stats.send.errors, 1);
}
MutexLocker selectLocker(device->select_lock);
notify_select_event_pool(device->select_pool, B_SELECT_READ);
return status;
}
status_t
tunnel_receive_data(net_device* _device, net_buffer** _buffer)
{
tunnel_device* device = (tunnel_device*)_device;
return gStackModule->fifo_dequeue_buffer(&device->receive_queue,
0, B_INFINITE_TIMEOUT, _buffer);
}
status_t
tunnel_set_mtu(net_device* device, size_t mtu)
{
if (mtu > 65536 || mtu < 16)
return B_BAD_VALUE;
device->mtu = mtu;
return B_OK;
}
status_t
tunnel_set_promiscuous(net_device* device, bool promiscuous)
{
return EOPNOTSUPP;
}
status_t
tunnel_set_media(net_device* device, uint32 media)
{
return EOPNOTSUPP;
}
status_t
tunnel_add_multicast(net_device* device, const sockaddr* address)
{
return B_OK;
}
status_t
tunnel_remove_multicast(net_device* device, const sockaddr* address)
{
return B_OK;
}
net_device_module_info sTunModule = {
{
"network/devices/tunnel/v1",
0,
NULL
},
tunnel_init,
tunnel_uninit,
tunnel_up,
tunnel_down,
tunnel_control,
tunnel_send_data,
tunnel_receive_data,
tunnel_set_mtu,
tunnel_set_promiscuous,
tunnel_set_media,
tunnel_add_multicast,
tunnel_remove_multicast,
};
module_dependency module_dependencies[] = {
{NET_STACK_MODULE_NAME, (module_info**)&gStackModule},
{NET_BUFFER_MODULE_NAME, (module_info**)&gBufferModule},
{}
};
module_info* modules[] = {
(module_info*)&sTunModule,
NULL
};

View File

@ -834,7 +834,7 @@ init_stack()
register_domain_datalink_protocols(AF_INET, IFT_LOOP,
"network/datalink_protocols/loopback_frame/v1", NULL);
register_domain_datalink_protocols(AF_INET, IFT_TUN,
"network/datalink_protocols/ethernet_frame/v1", NULL);
"network/datalink_protocols/loopback_frame/v1", NULL);
#if 0 // PPP is not (currently) included in the build
register_domain_datalink_protocols(AF_INET, IFT_PPP,
"network/datalink_protocols/ppp_frame/v1", NULL);

View File

@ -17,4 +17,3 @@ SubInclude HAIKU_TOP src bin network route ;
SubInclude HAIKU_TOP src bin network telnet ;
SubInclude HAIKU_TOP src bin network telnetd ;
SubInclude HAIKU_TOP src bin network traceroute ;
SubInclude HAIKU_TOP src bin network tunconfig ;

View File

@ -1,19 +0,0 @@
SubDir HAIKU_TOP src bin network tunconfig ;
UsePrivateKernelHeaders ;
UsePrivateHeaders net ;
UseHeaders [ FDirName $(HAIKU_TOP) src add-ons kernel network ppp shared libppp
headers ] : true ;
UseHeaders [ FDirName $(HAIKU_TOP) src add-ons kernel network ppp shared
libkernelppp headers ] : true ;
BinCommand tunconfig :
tunconfig.cpp
:
be libppp.a [ TargetLibsupc++ ] libbsd.so $(TARGET_NETWORK_LIBS)
;
# Installation
HaikuInstall install-tun
: /boot/home/config/non-packaged/bin
: tunconfig ;

View File

@ -1,76 +0,0 @@
/*
* Copyright 2006-2019, Haiku, Inc. All rights Reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* Alexander von Gluck IV <kallisti5@unixzen.com>
*/
#include <stdio.h>
#include <String.h>
#include <NetworkInterface.h>
#include <NetworkRoster.h>
static
status_t
print_help()
{
fprintf(stderr, "tunconfig\n");
fprintf(stderr, "With tunconfig you can create and manage tun/tap devices.\n");
fprintf(stderr, "Usage:\n");
fprintf(stderr, " tunconfig show | -a\n");
fprintf(stderr, " tunconfig init <name>\n");
fprintf(stderr, " tunconfig create <name>\n");
fprintf(stderr, " tunconfig delete <name|interface|id>\n");
fprintf(stderr, " tunconfig details <name|interface|id>\n");
fprintf(stderr, "\t<name> must be an interface description file\n");
return -1;
}
static
status_t
show_interface(const char* name)
{
printf("%s\n", name);
return B_OK;
}
static
status_t
show_all()
{
BNetworkRoster& roster = BNetworkRoster::Default();
BNetworkInterface interface;
uint32 cookie = 0;
while (roster.GetNextInterface(&cookie, interface) == B_OK) {
BNetworkAddress linkAddress;
status_t status = interface.GetHardwareAddress(linkAddress);
if (status == B_OK && linkAddress.LinkLevelType() == IFT_TUN)
show_interface(interface.Name());
}
return B_OK;
}
int
main(int argc, char *argv[])
{
if (argc == 2) {
if (!strcmp(argv[1], "show") || !strcmp(argv[1], "-a"))
return show_all();
else
return print_help();
} else {
return print_help();
}
return 0;
}