├── tests ├── tcp_peers.txt ├── udp_peers.txt ├── data64.key ├── Makefile ├── qemu │ └── launch_deb2.sh ├── multihome-test.sh ├── float-test.sh └── netns-test.sh ├── .gitignore ├── drivers └── net │ ├── ovpn-dco │ ├── stats.c │ ├── rcu.h │ ├── Makefile │ ├── netlink.h │ ├── udp.h │ ├── crypto_aead.h │ ├── main.h │ ├── addr.h │ ├── tcp.h │ ├── skb.h │ ├── ovpn.h │ ├── sock.h │ ├── ovpnstruct.h │ ├── bind.c │ ├── bind.h │ ├── stats.h │ ├── proto.h │ ├── pktid.c │ ├── pktid.h │ ├── sock.c │ ├── crypto.h │ ├── crypto.c │ ├── peer.h │ ├── main.c │ ├── udp.c │ ├── crypto_aead.c │ ├── tcp.c │ ├── ovpn.c │ ├── peer.c │ └── netlink.c │ └── Kconfig ├── compat-include └── net │ └── gso.h ├── gen-compat-autoconf.sh ├── Makefile ├── README ├── include └── uapi │ └── linux │ └── ovpn_dco.h └── linux-compat.h /tests/tcp_peers.txt: -------------------------------------------------------------------------------- 1 | 1 5.5.5.2 2 | 2 5.5.5.3 3 | 3 5.5.5.4 4 | 4 5.5.5.5 5 | 5 5.5.5.6 6 | -------------------------------------------------------------------------------- /tests/udp_peers.txt: -------------------------------------------------------------------------------- 1 | 1 10.10.1.2 1 5.5.5.2 2 | 2 10.10.2.2 1 5.5.5.3 3 | 3 10.10.3.2 1 5.5.5.4 4 | 4 10.10.4.2 1 5.5.5.5 5 | 5 10.10.5.2 1 5.5.5.6 6 | -------------------------------------------------------------------------------- /tests/data64.key: -------------------------------------------------------------------------------- 1 | jRqMACN7d7/aFQNT8S7jkrBD8uwrgHbG5OQZP2eu4R1Y7tfpS2bf5RHv06Vi163CGoaIiTX99R3B 2 | ia9ycAH8Wz1+9PWv51dnBLur9jbShlgZ2QHLtUc4a/gfT7zZwULXuuxdLnvR21DDeMBaTbkgbai9 3 | uvAa7ne1liIgGFzbv+Bas4HDVrygxIxuAnP5Qgc3648IJkZ0QEXPF+O9f0n5+QIvGCxkAUVx+5K6 4 | KIs+SoeWXnAopELmoGSjUpFtJbagXK82HfdqpuUxT2Tnuef0/14SzVE/vNleBNu2ZbyrSAaah8tE 5 | BofkPJUBFY+YQcfZNM5Dgrw3i+Bpmpq/gpdg5w== 6 | -------------------------------------------------------------------------------- /tests/Makefile: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-2.0 2 | # Copyright (C) 2020- OpenVPN, Inc. 3 | # 4 | # Author: Antonio Quartulli 5 | 6 | RM ?= rm -f 7 | CFLAGS = -Wall 8 | 9 | 10 | ovpn-cli: ovpn-cli.c 11 | $(CC) $(CFLAGS) $@.c -I../include/uapi \ 12 | `pkg-config --cflags --libs libnl-3.0 libnl-genl-3.0` \ 13 | -lmbedtls -lmbedcrypto -Wall -o $@ 14 | 15 | clean: 16 | $(RM) ovpn-cli 17 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.cache.mk 2 | /compat-autoconf.h 3 | /compat-autoconf.h.tmp 4 | /modules.order 5 | /Module.symvers 6 | /drivers/net/ovpn-dco/ovpn-dco.ko 7 | /drivers/net/ovpn-dco/.ovpn-dco.ko.cmd 8 | /drivers/net/ovpn-dco/ovpn-dco.mod.c 9 | /drivers/net/ovpn-dco/.ovpn-dco.mod.cmd 10 | /drivers/net/ovpn-dco/ovpn-dco.mod 11 | /drivers/net/ovpn-dco/modules.order 12 | /drivers/net/ovpn-dco/*.o 13 | /drivers/net/ovpn-dco/.*.cmd 14 | /.tmp_versions 15 | /tests/ovpn-cli 16 | /.*.cmd 17 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/stats.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: James Yonan 7 | * Antonio Quartulli 8 | */ 9 | 10 | #include "main.h" 11 | #include "stats.h" 12 | 13 | void ovpn_peer_stats_init(struct ovpn_peer_stats *ps) 14 | { 15 | atomic64_set(&ps->rx.bytes, 0); 16 | atomic_set(&ps->rx.packets, 0); 17 | 18 | atomic64_set(&ps->tx.bytes, 0); 19 | atomic_set(&ps->tx.packets, 0); 20 | } 21 | -------------------------------------------------------------------------------- /drivers/net/Kconfig: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-2.0 2 | # Copyright (C) 2020- OpenVPN, Inc. 3 | # 4 | # Author: Antonio Quartulli 5 | 6 | config OVPN_DCO_V2 7 | tristate "OpenVPN data channel offload (reloaded)" 8 | depends on NET && INET 9 | select NET_UDP_TUNNEL 10 | select DST_CACHE 11 | select CRYPTO 12 | select CRYPTO_AES 13 | select CRYPTO_GCM 14 | select CRYPTO_CHACHA20POLY1305 15 | help 16 | This module enhances the performance of the OpenVPN userspace software 17 | by offloading the data channel processing to kernelspace. 18 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/rcu.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2019-2023 OpenVPN, Inc. 5 | * 6 | * Author: James Yonan 7 | * Antonio Quartulli 8 | */ 9 | 10 | #ifndef _NET_OVPN_DCO_OVPNRCU_H_ 11 | #define _NET_OVPN_DCO_OVPNRCU_H_ 12 | 13 | static inline void ovpn_rcu_lockdep_assert_held(void) 14 | { 15 | #ifdef CONFIG_PROVE_RCU 16 | RCU_LOCKDEP_WARN(!rcu_read_lock_held(), 17 | "ovpn-dco RCU read lock not held"); 18 | #endif 19 | } 20 | 21 | #endif /* _NET_OVPN_DCO_OVPNRCU_H_ */ 22 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/Makefile: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-2.0 2 | # 3 | # ovpn-dco -- OpenVPN data channel offload in kernel space 4 | # 5 | # Copyright (C) 2020- OpenVPN, Inc. 6 | # 7 | # Author: Antonio Quartulli 8 | 9 | obj-$(CONFIG_OVPN_DCO_V2) += ovpn-dco-v2.o 10 | ovpn-dco-v2-y += main.o 11 | ovpn-dco-v2-y += bind.o 12 | ovpn-dco-v2-y += crypto.o 13 | ovpn-dco-v2-y += ovpn.o 14 | ovpn-dco-v2-y += peer.o 15 | ovpn-dco-v2-y += sock.o 16 | ovpn-dco-v2-y += stats.o 17 | ovpn-dco-v2-y += netlink.o 18 | ovpn-dco-v2-y += crypto_aead.o 19 | ovpn-dco-v2-y += pktid.o 20 | ovpn-dco-v2-y += tcp.o 21 | ovpn-dco-v2-y += udp.o 22 | -------------------------------------------------------------------------------- /compat-include/net/gso.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2023-2024 OpenVPN, Inc. 5 | * 6 | * Author: Antonio Quartulli 7 | */ 8 | 9 | #ifndef _NET_OVPN_COMPAT_NET_GSO_H 10 | #define _NET_OVPN_COMPAT_NET_GSO_H 11 | 12 | #include 13 | 14 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 10) || \ 15 | SUSE_PRODUCT_CODE >= SUSE_PRODUCT(1, 15, 6, 0) || \ 16 | RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(9, 4) 17 | #include_next 18 | #else 19 | #include 20 | #endif 21 | 22 | #endif /* _NET_OVPN_COMPAT_NET_GSO_H */ 23 | -------------------------------------------------------------------------------- /tests/qemu/launch_deb2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DISK=emulation/debian2.img 4 | BZIMAGE=linux-kernel/arch/x86/boot/bzImage 5 | SHARE=/home/user/share 6 | 7 | sudo qemu-system-x86_64 \ 8 | -device virtio-balloon \ 9 | -enable-kvm \ 10 | -accel kvm \ 11 | -kernel ${BZIMAGE} \ 12 | -append 'console=ttyS0 root=/dev/vda1 pci=noacpi' \ 13 | -drive file=${DISK},if=virtio,cache=writeback \ 14 | -virtfs local,path=${SHARE},security_model=passthrough,mount_tag=k1 \ 15 | -nic tap,ifname=tap0,script=no,downscript=no,model=virtio-net-pci \ 16 | -display none \ 17 | -nographic \ 18 | -serial mon:stdio \ 19 | -m 2048 -boot c \ 20 | -snapshot -s 21 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/netlink.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: Antonio Quartulli 7 | */ 8 | 9 | #ifndef _NET_OVPN_DCO_NETLINK_H_ 10 | #define _NET_OVPN_DCO_NETLINK_H_ 11 | 12 | struct ovpn_struct; 13 | struct ovpn_peer; 14 | 15 | int ovpn_netlink_init(struct ovpn_struct *ovpn); 16 | int ovpn_netlink_register(void); 17 | void ovpn_netlink_unregister(void); 18 | int ovpn_netlink_send_packet(struct ovpn_struct *ovpn, const struct ovpn_peer *peer, 19 | const u8 *buf, size_t len); 20 | int ovpn_netlink_notify_del_peer(struct ovpn_peer *peer); 21 | 22 | #endif /* _NET_OVPN_DCO_NETLINK_H_ */ 23 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/udp.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2019-2023 OpenVPN, Inc. 5 | * 6 | * Author: Antonio Quartulli 7 | */ 8 | 9 | #ifndef _NET_OVPN_DCO_UDP_H_ 10 | #define _NET_OVPN_DCO_UDP_H_ 11 | 12 | #include "peer.h" 13 | #include "ovpnstruct.h" 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | int ovpn_udp_socket_attach(struct socket *sock, struct ovpn_struct *ovpn); 21 | void ovpn_udp_socket_detach(struct socket *sock); 22 | void ovpn_udp_send_skb(struct ovpn_struct *ovpn, struct ovpn_peer *peer, 23 | struct sk_buff *skb); 24 | 25 | #endif /* _NET_OVPN_DCO_UDP_H_ */ 26 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/crypto_aead.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: James Yonan 7 | * Antonio Quartulli 8 | */ 9 | 10 | #ifndef _NET_OVPN_DCO_OVPNAEAD_H_ 11 | #define _NET_OVPN_DCO_OVPNAEAD_H_ 12 | 13 | #include "crypto.h" 14 | 15 | #include 16 | #include 17 | 18 | struct crypto_aead *ovpn_aead_init(const char *title, const char *alg_name, 19 | const unsigned char *key, unsigned int keylen); 20 | 21 | int ovpn_aead_encrypt(struct ovpn_crypto_key_slot *ks, struct sk_buff *skb, u32 peer_id); 22 | int ovpn_aead_decrypt(struct ovpn_crypto_key_slot *ks, struct sk_buff *skb); 23 | 24 | struct ovpn_crypto_key_slot *ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc); 25 | void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks); 26 | 27 | #endif /* _NET_OVPN_DCO_OVPNAEAD_H_ */ 28 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/main.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2019-2023 OpenVPN, Inc. 5 | * 6 | * Author: James Yonan 7 | * Antonio Quartulli 8 | */ 9 | 10 | #ifndef _NET_OVPN_DCO_MAIN_H_ 11 | #define _NET_OVPN_DCO_MAIN_H_ 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | #ifndef OVPN_DCO_VERSION 19 | #define OVPN_DCO_VERSION "2.0.0" 20 | #endif 21 | 22 | struct net_device; 23 | bool ovpn_dev_is_valid(const struct net_device *dev); 24 | 25 | #define SKB_HEADER_LEN \ 26 | (max(sizeof(struct iphdr), sizeof(struct ipv6hdr)) + \ 27 | sizeof(struct udphdr) + NET_SKB_PAD) 28 | 29 | #define OVPN_HEAD_ROOM ALIGN(16 + SKB_HEADER_LEN, 4) 30 | #define OVPN_MAX_PADDING 16 31 | #define OVPN_QUEUE_LEN 1024 32 | #define OVPN_MAX_TUN_QUEUE_LEN 0x10000 33 | 34 | #endif /* _NET_OVPN_DCO_OVPN_DCO_H_ */ 35 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/addr.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: James Yonan 7 | * Antonio Quartulli 8 | */ 9 | 10 | #ifndef _NET_OVPN_DCO_OVPNADDR_H_ 11 | #define _NET_OVPN_DCO_OVPNADDR_H_ 12 | 13 | #include "crypto.h" 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | /* our basic transport layer address */ 21 | struct ovpn_sockaddr { 22 | union { 23 | struct sockaddr_in in4; 24 | struct sockaddr_in6 in6; 25 | }; 26 | }; 27 | 28 | /* Translate skb->protocol value to AF_INET or AF_INET6 */ 29 | static inline unsigned short skb_protocol_to_family(const struct sk_buff *skb) 30 | { 31 | switch (skb->protocol) { 32 | case htons(ETH_P_IP): 33 | return AF_INET; 34 | case htons(ETH_P_IPV6): 35 | return AF_INET6; 36 | default: 37 | return 0; 38 | } 39 | } 40 | 41 | #endif /* _NET_OVPN_DCO_OVPNADDR_H_ */ 42 | -------------------------------------------------------------------------------- /gen-compat-autoconf.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # SPDX-License-Identifier: GPL-2.0 3 | # Copyright (C) 2020- OpenVPN, Inc. 4 | # 5 | # Author: Antonio Quartulli 6 | 7 | set -e 8 | 9 | TARGET=${1:="compat-autoconf.h"} 10 | TMP="${TARGET}.tmp" 11 | 12 | echo > "${TMP}" 13 | 14 | gen_config() { 15 | KEY="${1}" 16 | VALUE="${2}" 17 | 18 | echo "#undef ${KEY}" 19 | echo "#undef __enabled_${KEY}" 20 | echo "#undef __enabled_${KEY}_MODULE" 21 | case "${VALUE}" in 22 | y) 23 | echo "#define ${KEY} 1" 24 | echo "#define __enabled_${KEY} 1" 25 | echo "#define __enabled_${KEY}_MODULE 0" 26 | ;; 27 | m) 28 | echo "#define ${KEY} 1" 29 | echo "#define __enabled_${KEY} 0" 30 | echo "#define __enabled_${KEY}_MODULE 1" 31 | ;; 32 | n) 33 | echo "#define __enabled_${KEY} 0" 34 | echo "#define __enabled_${KEY}_MODULE 0" 35 | ;; 36 | *) 37 | echo "#define ${KEY} \"${VALUE}\"" 38 | ;; 39 | esac 40 | } 41 | 42 | gen_config 'CONFIG_OVPN_DCO_DEBUG' ${CONFIG_OVPN_DCO_DEBUG:="n"} >> "${TMP}" 43 | 44 | # only regenerate compat-autoconf.h when config was changed 45 | diff "${TMP}" "${TARGET}" > /dev/null 2>&1 || cp "${TMP}" "${TARGET}" 46 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/tcp.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2019-2023 OpenVPN, Inc. 5 | * 6 | * Author: Antonio Quartulli 7 | */ 8 | 9 | #ifndef _NET_OVPN_DCO_TCP_H_ 10 | #define _NET_OVPN_DCO_TCP_H_ 11 | 12 | #include "peer.h" 13 | 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | /* Initialize TCP static objects */ 20 | int __init ovpn_tcp_init(void); 21 | 22 | void ovpn_queue_tcp_skb(struct ovpn_peer *peer, struct sk_buff *skb); 23 | 24 | int ovpn_tcp_socket_attach(struct socket *sock, struct ovpn_peer *peer); 25 | void ovpn_tcp_socket_detach(struct socket *sock); 26 | 27 | /* Prepare skb and enqueue it for sending to peer. 28 | * 29 | * Preparation consist in prepending the skb payload with its size. 30 | * Required by the OpenVPN protocol in order to extract packets from 31 | * the TCP stream on the receiver side. 32 | */ 33 | static inline void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct sk_buff *skb) 34 | { 35 | u16 len = skb->len; 36 | 37 | *(__be16 *)__skb_push(skb, sizeof(u16)) = htons(len); 38 | ovpn_queue_tcp_skb(peer, skb); 39 | } 40 | 41 | #endif /* _NET_OVPN_DCO_TCP_H_ */ 42 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/skb.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: Antonio Quartulli 7 | * James Yonan 8 | */ 9 | 10 | #ifndef _NET_OVPN_DCO_SKB_H_ 11 | #define _NET_OVPN_DCO_SKB_H_ 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | #define OVPN_SKB_CB(skb) ((struct ovpn_skb_cb *)&((skb)->cb)) 21 | 22 | struct ovpn_skb_cb { 23 | union { 24 | struct in_addr ipv4; 25 | struct in6_addr ipv6; 26 | } local; 27 | sa_family_t sa_fam; 28 | }; 29 | 30 | /* Return IP protocol version from skb header. 31 | * Return 0 if protocol is not IPv4/IPv6 or cannot be read. 32 | */ 33 | static inline __be16 ovpn_ip_check_protocol(struct sk_buff *skb) 34 | { 35 | __be16 proto = 0; 36 | 37 | /* skb could be non-linear, 38 | * make sure IP header is in non-fragmented part 39 | */ 40 | if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) 41 | return 0; 42 | 43 | if (ip_hdr(skb)->version == 4) 44 | proto = htons(ETH_P_IP); 45 | else if (ip_hdr(skb)->version == 6) 46 | proto = htons(ETH_P_IPV6); 47 | 48 | return proto; 49 | } 50 | 51 | #endif /* _NET_OVPN_DCO_SKB_H_ */ 52 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/ovpn.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2019-2023 OpenVPN, Inc. 5 | * 6 | * Author: James Yonan 7 | * Antonio Quartulli 8 | */ 9 | 10 | #ifndef _NET_OVPN_DCO_OVPN_H_ 11 | #define _NET_OVPN_DCO_OVPN_H_ 12 | 13 | #include "main.h" 14 | #include "peer.h" 15 | #include "sock.h" 16 | #include "ovpnstruct.h" 17 | 18 | #include 19 | #include 20 | #include 21 | 22 | struct ovpn_struct; 23 | struct net_device; 24 | 25 | int ovpn_struct_init(struct net_device *dev); 26 | 27 | u16 ovpn_select_queue(struct net_device *dev, struct sk_buff *skb, 28 | struct net_device *sb_dev); 29 | 30 | void ovpn_keepalive_xmit(struct ovpn_peer *peer); 31 | void ovpn_explicit_exit_notify_xmit(struct ovpn_peer *peer); 32 | 33 | netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev); 34 | 35 | int ovpn_recv(struct ovpn_struct *ovpn, struct ovpn_peer *peer, struct sk_buff *skb); 36 | 37 | void ovpn_encrypt_work(struct work_struct *work); 38 | void ovpn_decrypt_work(struct work_struct *work); 39 | int ovpn_napi_poll(struct napi_struct *napi, int budget); 40 | 41 | int ovpn_send_data(struct ovpn_struct *ovpn, u32 peer_id, const u8 *data, size_t len); 42 | 43 | #endif /* _NET_OVPN_DCO_OVPN_H_ */ 44 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/sock.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: James Yonan 7 | * Antonio Quartulli 8 | */ 9 | 10 | #ifndef _NET_OVPN_DCO_SOCK_H_ 11 | #define _NET_OVPN_DCO_SOCK_H_ 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | #include "peer.h" 19 | 20 | struct ovpn_struct; 21 | 22 | /** 23 | * struct ovpn_socket - a kernel socket referenced in the ovpn-dco code 24 | */ 25 | struct ovpn_socket { 26 | union { 27 | /** @ovpn: the VPN session object owning this socket (UDP only) */ 28 | struct ovpn_struct *ovpn; 29 | 30 | /* TCP only */ 31 | struct { 32 | /** @peer: the unique peer transmitting over this socket (TCP only) */ 33 | struct ovpn_peer *peer; 34 | struct ptr_ring recv_ring; 35 | }; 36 | }; 37 | 38 | /** @sock: the kernel socket */ 39 | struct socket *sock; 40 | 41 | /** @refcount: amount of contexts currently referencing this object */ 42 | struct kref refcount; 43 | 44 | /** @rcu: member used to schedule RCU destructor callback */ 45 | struct rcu_head rcu; 46 | }; 47 | 48 | struct ovpn_struct *ovpn_from_udp_sock(struct sock *sk); 49 | 50 | void ovpn_socket_release_kref(struct kref *kref); 51 | 52 | static inline void ovpn_socket_put(struct ovpn_socket *sock) 53 | { 54 | kref_put(&sock->refcount, ovpn_socket_release_kref); 55 | } 56 | 57 | struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer); 58 | 59 | #endif /* _NET_OVPN_DCO_SOCK_H_ */ 60 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/ovpnstruct.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2019-2023 OpenVPN, Inc. 5 | * 6 | * Author: James Yonan 7 | * Antonio Quartulli 8 | */ 9 | 10 | #ifndef _NET_OVPN_DCO_OVPNSTRUCT_H_ 11 | #define _NET_OVPN_DCO_OVPNSTRUCT_H_ 12 | 13 | #include "peer.h" 14 | 15 | #include 16 | #include 17 | #include 18 | 19 | /* Our state per ovpn interface */ 20 | struct ovpn_struct { 21 | /* read-mostly objects in this section */ 22 | struct net_device *dev; 23 | 24 | /* device operation mode (i.e. P2P, MP) */ 25 | enum ovpn_mode mode; 26 | 27 | /* protect writing to the ovpn_struct object */ 28 | spinlock_t lock; 29 | 30 | /* workqueue used to schedule crypto work that may sleep */ 31 | struct workqueue_struct *crypto_wq; 32 | /* workqueue used to schedule generic event that may sleep or that need 33 | * to be performed out of softirq context 34 | */ 35 | struct workqueue_struct *events_wq; 36 | 37 | /* list of known peers */ 38 | struct { 39 | DECLARE_HASHTABLE(by_id, 12); 40 | DECLARE_HASHTABLE(by_transp_addr, 12); 41 | DECLARE_HASHTABLE(by_vpn_addr, 12); 42 | /* protects write access to any of the hashtables above */ 43 | spinlock_t lock; 44 | } peers; 45 | 46 | /* for p2p mode */ 47 | struct ovpn_peer __rcu *peer; 48 | 49 | unsigned int max_tun_queue_len; 50 | 51 | netdev_features_t set_features; 52 | 53 | void *security; 54 | 55 | u32 registered_nl_portid; 56 | bool registered_nl_portid_set; 57 | }; 58 | 59 | #endif /* _NET_OVPN_DCO_OVPNSTRUCT_H_ */ 60 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/bind.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2012-2023 OpenVPN, Inc. 5 | * 6 | * Author: James Yonan 7 | * Antonio Quartulli 8 | */ 9 | 10 | #include "ovpn.h" 11 | #include "bind.h" 12 | #include "peer.h" 13 | 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | /* Given a remote sockaddr, compute the skb hash 20 | * and get a dst_entry so we can send packets to the remote. 21 | * Called from process context or softirq (must be indicated with 22 | * process_context bool). 23 | */ 24 | struct ovpn_bind *ovpn_bind_from_sockaddr(const struct sockaddr_storage *ss) 25 | { 26 | struct ovpn_bind *bind; 27 | size_t sa_len; 28 | 29 | if (ss->ss_family == AF_INET) 30 | sa_len = sizeof(struct sockaddr_in); 31 | else if (ss->ss_family == AF_INET6) 32 | sa_len = sizeof(struct sockaddr_in6); 33 | else 34 | return ERR_PTR(-EAFNOSUPPORT); 35 | 36 | bind = kzalloc(sizeof(*bind), GFP_ATOMIC); 37 | if (unlikely(!bind)) 38 | return ERR_PTR(-ENOMEM); 39 | 40 | memcpy(&bind->sa, ss, sa_len); 41 | 42 | return bind; 43 | } 44 | 45 | static void ovpn_bind_release_rcu(struct rcu_head *head) 46 | { 47 | struct ovpn_bind *bind = container_of(head, struct ovpn_bind, rcu); 48 | 49 | kfree(bind); 50 | } 51 | 52 | void ovpn_bind_reset(struct ovpn_peer *peer, struct ovpn_bind *new) 53 | { 54 | struct ovpn_bind *old; 55 | 56 | spin_lock_bh(&peer->lock); 57 | old = rcu_replace_pointer(peer->bind, new, true); 58 | spin_unlock_bh(&peer->lock); 59 | 60 | if (old) 61 | call_rcu(&old->rcu, ovpn_bind_release_rcu); 62 | } 63 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/bind.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OVPN -- OpenVPN protocol accelerator for Linux 3 | * Copyright (C) 2012-2023 OpenVPN, Inc. 4 | * All rights reserved. 5 | * Author: James Yonan 6 | */ 7 | 8 | #ifndef _NET_OVPN_DCO_OVPNBIND_H_ 9 | #define _NET_OVPN_DCO_OVPNBIND_H_ 10 | 11 | #include "addr.h" 12 | #include "rcu.h" 13 | 14 | #include 15 | #include 16 | #include 17 | 18 | struct ovpn_peer; 19 | 20 | struct ovpn_bind { 21 | struct ovpn_sockaddr sa; /* remote sockaddr */ 22 | 23 | union { 24 | struct in_addr ipv4; 25 | struct in6_addr ipv6; 26 | } local; 27 | 28 | struct rcu_head rcu; 29 | }; 30 | 31 | static inline bool ovpn_bind_skb_src_match(const struct ovpn_bind *bind, struct sk_buff *skb) 32 | { 33 | const unsigned short family = skb_protocol_to_family(skb); 34 | const struct ovpn_sockaddr *sa = &bind->sa; 35 | 36 | if (unlikely(!bind)) 37 | return false; 38 | 39 | if (unlikely(sa->in4.sin_family != family)) 40 | return false; 41 | 42 | switch (family) { 43 | case AF_INET: 44 | if (unlikely(sa->in4.sin_addr.s_addr != ip_hdr(skb)->saddr)) 45 | return false; 46 | 47 | if (unlikely(sa->in4.sin_port != udp_hdr(skb)->source)) 48 | return false; 49 | break; 50 | case AF_INET6: 51 | if (unlikely(!ipv6_addr_equal(&sa->in6.sin6_addr, &ipv6_hdr(skb)->saddr))) 52 | return false; 53 | 54 | if (unlikely(sa->in6.sin6_port != udp_hdr(skb)->source)) 55 | return false; 56 | break; 57 | default: 58 | return false; 59 | } 60 | 61 | return true; 62 | } 63 | 64 | struct ovpn_bind *ovpn_bind_from_sockaddr(const struct sockaddr_storage *sa); 65 | void ovpn_bind_reset(struct ovpn_peer *peer, struct ovpn_bind *bind); 66 | 67 | #endif /* _NET_OVPN_DCO_OVPNBIND_H_ */ 68 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/stats.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: James Yonan 7 | * Antonio Quartulli 8 | * Lev Stipakov 9 | */ 10 | 11 | #ifndef _NET_OVPN_DCO_OVPNSTATS_H_ 12 | #define _NET_OVPN_DCO_OVPNSTATS_H_ 13 | 14 | #include 15 | #include 16 | 17 | struct ovpn_struct; 18 | 19 | /* per-peer stats, measured on transport layer */ 20 | 21 | /* one stat */ 22 | struct ovpn_peer_stat { 23 | atomic64_t bytes; 24 | atomic_t packets; 25 | }; 26 | 27 | /* rx and tx stats, enabled by notify_per != 0 or period != 0 */ 28 | struct ovpn_peer_stats { 29 | struct ovpn_peer_stat rx; 30 | struct ovpn_peer_stat tx; 31 | }; 32 | 33 | /* struct for OVPN_ERR_STATS */ 34 | 35 | struct ovpn_err_stat { 36 | unsigned int category; 37 | int errcode; 38 | u64 count; 39 | }; 40 | 41 | struct ovpn_err_stats { 42 | /* total stats, returned by kovpn */ 43 | unsigned int total_stats; 44 | /* number of stats dimensioned below */ 45 | unsigned int n_stats; 46 | struct ovpn_err_stat stats[]; 47 | }; 48 | 49 | void ovpn_peer_stats_init(struct ovpn_peer_stats *ps); 50 | 51 | static inline void ovpn_peer_stats_increment(struct ovpn_peer_stat *stat, const unsigned int n) 52 | { 53 | atomic64_add(n, &stat->bytes); 54 | atomic_inc(&stat->packets); 55 | } 56 | 57 | static inline void ovpn_peer_stats_increment_rx(struct ovpn_peer_stats *stats, const unsigned int n) 58 | { 59 | ovpn_peer_stats_increment(&stats->rx, n); 60 | } 61 | 62 | static inline void ovpn_peer_stats_increment_tx(struct ovpn_peer_stats *stats, const unsigned int n) 63 | { 64 | ovpn_peer_stats_increment(&stats->tx, n); 65 | } 66 | 67 | #endif /* _NET_OVPN_DCO_OVPNSTATS_H_ */ 68 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-2.0 2 | # Copyright (C) 2020- OpenVPN, Inc. 3 | # 4 | # Author: Antonio Quartulli 5 | 6 | PWD:=$(shell pwd) 7 | KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build 8 | ifeq ($(shell cd $(KERNEL_SRC) && pwd),) 9 | $(warning $(KERNEL_SRC) is missing, please set KERNEL_SRC) 10 | endif 11 | 12 | export KERNEL_SRC 13 | RM ?= rm -f 14 | CP := cp -fpR 15 | LN := ln -sf 16 | DEPMOD := depmod -a 17 | 18 | REVISION= $(shell if [ -d "$(PWD)/.git" ]; then \ 19 | echo $$(git --git-dir="$(PWD)/.git" describe --always --dirty --match "v*" |sed 's/^v//' 2> /dev/null || echo "[unknown]"); \ 20 | fi) 21 | ifneq ("$(wildcard $(KERNEL_SRC)/include/generated/uapi/linux/suse_version.h)","") 22 | VERSION_INCLUDE = -include linux/suse_version.h 23 | endif 24 | 25 | NOSTDINC_FLAGS += \ 26 | -I$(PWD)/include/ \ 27 | $(CFLAGS) \ 28 | $(VERSION_INCLUDE) \ 29 | -include $(PWD)/linux-compat.h \ 30 | -I$(PWD)/compat-include/ 31 | 32 | ifneq ($(REVISION),) 33 | NOSTDINC_FLAGS += -DOVPN_DCO_VERSION=\"$(REVISION)\" 34 | endif 35 | 36 | ifeq ($(DEBUG),1) 37 | NOSTDINC_FLAGS += -DDEBUG=1 38 | endif 39 | 40 | obj-y += drivers/net/ovpn-dco/ 41 | export ovpn-dco-v2-y 42 | 43 | BUILD_FLAGS := \ 44 | M=$(PWD) \ 45 | PWD=$(PWD) \ 46 | REVISION=$(REVISION) \ 47 | CONFIG_OVPN_DCO_V2=m \ 48 | INSTALL_MOD_DIR=updates/ 49 | 50 | all: config 51 | $(MAKE) -C $(KERNEL_SRC) $(BUILD_FLAGS) modules 52 | 53 | clean: 54 | $(RM) psk_client 55 | $(RM) compat-autoconf.h* 56 | $(MAKE) -C $(KERNEL_SRC) $(BUILD_FLAGS) clean 57 | $(MAKE) -C tests clean 58 | 59 | install: config 60 | $(MAKE) -C $(KERNEL_SRC) $(BUILD_FLAGS) modules_install 61 | $(DEPMOD) 62 | 63 | config: 64 | $(PWD)/gen-compat-autoconf.sh $(PWD)/compat-autoconf.h 65 | 66 | tests: 67 | $(MAKE) -C tests 68 | 69 | .PHONY: all clean install config tests 70 | -------------------------------------------------------------------------------- /tests/multihome-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # SPDX-License-Identifier: GPL-2.0 3 | # Copyright (C) 2021-2023 OpenVPN, Inc. 4 | # 5 | # Author: Antonio Quartulli 6 | 7 | #set -x 8 | set -e 9 | 10 | UDP_PEERS_FILE=${UDP_PEERS_FILE:-udp_peers.txt} 11 | TCP_PEERS_FILE=${TCP_PEERS_FILE:-tcp_peers.txt} 12 | OVPN_CLI=${OVPN_CLI:-./ovpn-cli} 13 | ALG=${ALG:-aes} 14 | 15 | function create_ns() { 16 | ip netns add peer$1 17 | } 18 | 19 | function setup_ns() { 20 | if [ $1 -eq 0 ]; then 21 | ip -n peer0 link add br0 type bridge 22 | ip -n peer0 link set br0 up 23 | 24 | for p in $(seq 1 $NUM_PEERS); do 25 | ip link add veth${p} netns peer0 type veth peer name veth${p} netns peer${p} 26 | ip -n peer0 addr add 10.10.0.${p}/16 dev br0 27 | ip -n peer0 link set veth${p} master br0 28 | ip -n peer0 link set veth${p} up 29 | 30 | ip -n peer${p} addr add 10.10.${p}.2/16 dev veth${p} 31 | ip -n peer${p} link set veth${p} up 32 | done 33 | fi 34 | 35 | ip -n peer$1 link add tun0 type ovpn-dco 36 | ip -n peer$1 addr add $2 dev tun0 37 | ip -n peer$1 link set tun0 up 38 | } 39 | 40 | function add_peer() { 41 | if [ $tcp -eq 0 ]; then 42 | if [ $1 -eq 0 ]; then 43 | ip netns exec peer0 $OVPN_CLI tun0 new_multi_peer 1 $UDP_PEERS_FILE 44 | 45 | for p in $(seq 1 $NUM_PEERS); do 46 | # ip netns exec peer0 $OVPN_CLI tun0 new_peer ${p} ${p} 10.10.${p}.2 1 5.5.5.$((${p} + 1)) 47 | ip netns exec peer0 $OVPN_CLI tun0 new_key ${p} $ALG 0 data64.key 48 | done 49 | else 50 | ip netns exec peer${1} $OVPN_CLI tun0 new_peer 1 ${1} 10.10.0.${1} 1 5.5.5.1 51 | ip netns exec peer${1} $OVPN_CLI tun0 new_key ${1} $ALG 1 data64.key 52 | fi 53 | else 54 | if [ $1 -eq 0 ]; then 55 | (ip netns exec peer$1 $OVPN_CLI tun0 listen 1 $TCP_PEERS_FILE && { 56 | for p in $(seq 1 $NUM_PEERS); do 57 | ip netns exec peer0 $OVPN_CLI tun0 new_key ${p} $ALG 0 data64.key 58 | done 59 | }) & 60 | sleep 5 61 | else 62 | ip netns exec peer${1} $OVPN_CLI tun0 connect ${1} 10.10.0.${1} 1 5.5.5.1 63 | ip netns exec peer${1} $OVPN_CLI tun0 new_key ${1} $ALG 1 data64.key 64 | fi 65 | fi 66 | } 67 | 68 | 69 | # clean up 70 | for p in $(seq 1 10); do 71 | ip -n peer0 link del veth${p} 2>/dev/null || true 72 | done 73 | for p in $(seq 0 10); do 74 | ip -n peer${p} link del tun0 2>/dev/null || true 75 | ip netns del peer${p} 2>/dev/null || true 76 | done 77 | 78 | tcp=0 79 | if [ "$1" == "-t" ]; then 80 | shift 81 | tcp=1 82 | NUM_PEERS=${NUM_PEERS:-$(wc -l $TCP_PEERS_FILE | awk '{print $1}')} 83 | else 84 | NUM_PEERS=${NUM_PEERS:-$(wc -l $UDP_PEERS_FILE | awk '{print $1}')} 85 | fi 86 | 87 | for p in $(seq 0 $NUM_PEERS); do 88 | create_ns ${p} 89 | done 90 | 91 | for p in $(seq 0 $NUM_PEERS); do 92 | setup_ns ${p} 5.5.5.$((${p} + 1))/24 93 | done 94 | 95 | for p in $(seq 0 $NUM_PEERS); do 96 | add_peer ${p} 97 | done 98 | 99 | for p in $(seq 1 $NUM_PEERS); do 100 | ip netns exec peer${p} ping -qfc 2000 -w 5 5.5.5.1 101 | done 102 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/proto.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: Antonio Quartulli 7 | * James Yonan 8 | */ 9 | 10 | #ifndef _NET_OVPN_DCO_OVPNPROTO_H_ 11 | #define _NET_OVPN_DCO_OVPNPROTO_H_ 12 | 13 | #include "main.h" 14 | 15 | #include 16 | 17 | /* Methods for operating on the initial command 18 | * byte of the OpenVPN protocol. 19 | */ 20 | 21 | /* packet opcode (high 5 bits) and key-id (low 3 bits) are combined in 22 | * one byte 23 | */ 24 | #define OVPN_KEY_ID_MASK 0x07 25 | #define OVPN_OPCODE_SHIFT 3 26 | #define OVPN_OPCODE_MASK 0x1F 27 | /* upper bounds on opcode and key ID */ 28 | #define OVPN_KEY_ID_MAX (OVPN_KEY_ID_MASK + 1) 29 | #define OVPN_OPCODE_MAX (OVPN_OPCODE_MASK + 1) 30 | /* packet opcodes of interest to us */ 31 | #define OVPN_DATA_V1 6 /* data channel V1 packet */ 32 | #define OVPN_DATA_V2 9 /* data channel V2 packet */ 33 | /* size of initial packet opcode */ 34 | #define OVPN_OP_SIZE_V1 1 35 | #define OVPN_OP_SIZE_V2 4 36 | #define OVPN_PEER_ID_MASK 0x00FFFFFF 37 | #define OVPN_PEER_ID_UNDEF 0x00FFFFFF 38 | /* first byte of keepalive message */ 39 | #define OVPN_KEEPALIVE_FIRST_BYTE 0x2a 40 | /* first byte of exit message */ 41 | #define OVPN_EXPLICIT_EXIT_NOTIFY_FIRST_BYTE 0x28 42 | 43 | /** 44 | * Extract the OP code from the specified byte 45 | * 46 | * Return the OP code 47 | */ 48 | static inline u8 ovpn_opcode_from_byte(u8 byte) 49 | { 50 | return byte >> OVPN_OPCODE_SHIFT; 51 | } 52 | 53 | /** 54 | * Extract the OP code from the skb head. 55 | * 56 | * Note: this function assumes that the skb head was pulled enough 57 | * to access the first byte. 58 | * 59 | * Return the OP code 60 | */ 61 | static inline u8 ovpn_opcode_from_skb(const struct sk_buff *skb, u16 offset) 62 | { 63 | return ovpn_opcode_from_byte(*(skb->data + offset)); 64 | } 65 | 66 | /** 67 | * Extract the key ID from the skb head. 68 | * 69 | * Note: this function assumes that the skb head was pulled enough 70 | * to access the first byte. 71 | * 72 | * Return the key ID 73 | */ 74 | 75 | static inline u8 ovpn_key_id_from_skb(const struct sk_buff *skb) 76 | { 77 | return *skb->data & OVPN_KEY_ID_MASK; 78 | } 79 | 80 | /** 81 | * Extract the peer ID from the skb head. 82 | * 83 | * Note: this function assumes that the skb head was pulled enough 84 | * to access the first 4 bytes. 85 | * 86 | * Return the peer ID. 87 | */ 88 | 89 | static inline u32 ovpn_peer_id_from_skb(const struct sk_buff *skb, u16 offset) 90 | { 91 | return ntohl(*(__be32 *)(skb->data + offset)) & OVPN_PEER_ID_MASK; 92 | } 93 | 94 | static inline u32 ovpn_opcode_compose(u8 opcode, u8 key_id, u32 peer_id) 95 | { 96 | const u8 op = (opcode << OVPN_OPCODE_SHIFT) | (key_id & OVPN_KEY_ID_MASK); 97 | 98 | return (op << 24) | (peer_id & OVPN_PEER_ID_MASK); 99 | } 100 | 101 | #endif /* _NET_OVPN_DCO_OVPNPROTO_H_ */ 102 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/pktid.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: Antonio Quartulli 7 | * James Yonan 8 | */ 9 | 10 | #include "pktid.h" 11 | 12 | #include 13 | #include 14 | 15 | void ovpn_pktid_xmit_init(struct ovpn_pktid_xmit *pid) 16 | { 17 | atomic64_set(&pid->seq_num, 1); 18 | pid->tcp_linear = NULL; 19 | } 20 | 21 | void ovpn_pktid_recv_init(struct ovpn_pktid_recv *pr) 22 | { 23 | memset(pr, 0, sizeof(*pr)); 24 | spin_lock_init(&pr->lock); 25 | } 26 | 27 | /* Packet replay detection. 28 | * Allows ID backtrack of up to REPLAY_WINDOW_SIZE - 1. 29 | */ 30 | int ovpn_pktid_recv(struct ovpn_pktid_recv *pr, u32 pkt_id, u32 pkt_time) 31 | { 32 | const unsigned long now = jiffies; 33 | int ret; 34 | 35 | spin_lock(&pr->lock); 36 | 37 | /* expire backtracks at or below pr->id after PKTID_RECV_EXPIRE time */ 38 | if (unlikely(time_after_eq(now, pr->expire))) 39 | pr->id_floor = pr->id; 40 | 41 | /* ID must not be zero */ 42 | if (unlikely(pkt_id == 0)) { 43 | ret = -EINVAL; 44 | goto out; 45 | } 46 | 47 | /* time changed? */ 48 | if (unlikely(pkt_time != pr->time)) { 49 | if (pkt_time > pr->time) { 50 | /* time moved forward, accept */ 51 | pr->base = 0; 52 | pr->extent = 0; 53 | pr->id = 0; 54 | pr->time = pkt_time; 55 | pr->id_floor = 0; 56 | } else { 57 | /* time moved backward, reject */ 58 | ret = -ETIME; 59 | goto out; 60 | } 61 | } 62 | 63 | if (likely(pkt_id == pr->id + 1)) { 64 | /* well-formed ID sequence (incremented by 1) */ 65 | pr->base = REPLAY_INDEX(pr->base, -1); 66 | pr->history[pr->base / 8] |= (1 << (pr->base % 8)); 67 | if (pr->extent < REPLAY_WINDOW_SIZE) 68 | ++pr->extent; 69 | pr->id = pkt_id; 70 | } else if (pkt_id > pr->id) { 71 | /* ID jumped forward by more than one */ 72 | const unsigned int delta = pkt_id - pr->id; 73 | 74 | if (delta < REPLAY_WINDOW_SIZE) { 75 | unsigned int i; 76 | 77 | pr->base = REPLAY_INDEX(pr->base, -delta); 78 | pr->history[pr->base / 8] |= (1 << (pr->base % 8)); 79 | pr->extent += delta; 80 | if (pr->extent > REPLAY_WINDOW_SIZE) 81 | pr->extent = REPLAY_WINDOW_SIZE; 82 | for (i = 1; i < delta; ++i) { 83 | unsigned int newb = REPLAY_INDEX(pr->base, i); 84 | 85 | pr->history[newb / 8] &= ~BIT(newb % 8); 86 | } 87 | } else { 88 | pr->base = 0; 89 | pr->extent = REPLAY_WINDOW_SIZE; 90 | memset(pr->history, 0, sizeof(pr->history)); 91 | pr->history[0] = 1; 92 | } 93 | pr->id = pkt_id; 94 | } else { 95 | /* ID backtrack */ 96 | const unsigned int delta = pr->id - pkt_id; 97 | 98 | if (delta > pr->max_backtrack) 99 | pr->max_backtrack = delta; 100 | if (delta < pr->extent) { 101 | if (pkt_id > pr->id_floor) { 102 | const unsigned int ri = REPLAY_INDEX(pr->base, 103 | delta); 104 | u8 *p = &pr->history[ri / 8]; 105 | const u8 mask = (1 << (ri % 8)); 106 | 107 | if (*p & mask) { 108 | ret = -EINVAL; 109 | goto out; 110 | } 111 | *p |= mask; 112 | } else { 113 | ret = -EINVAL; 114 | goto out; 115 | } 116 | } else { 117 | ret = -EINVAL; 118 | goto out; 119 | } 120 | } 121 | 122 | pr->expire = now + PKTID_RECV_EXPIRE; 123 | ret = 0; 124 | out: 125 | spin_unlock(&pr->lock); 126 | return ret; 127 | } 128 | -------------------------------------------------------------------------------- /tests/float-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # SPDX-License-Identifier: GPL-2.0 3 | # Copyright (C) 2020- OpenVPN, Inc. 4 | # 5 | # Author: Antonio Quartulli 6 | 7 | #set -x 8 | set -e 9 | 10 | UDP_PEERS_FILE=${UDP_PEERS_FILE:-udp_peers.txt} 11 | TCP_PEERS_FILE=${TCP_PEERS_FILE:-tcp_peers.txt} 12 | OVPN_CLI=${OVPN_CLI:-./ovpn-cli} 13 | ALG=${ALG:-aes} 14 | 15 | function create_ns() { 16 | ip netns add peer$1 17 | } 18 | 19 | function setup_ns() { 20 | if [ $1 -eq 0 ]; then 21 | for p in $(seq 1 $NUM_PEERS); do 22 | ip link add veth${p} netns peer0 type veth peer name veth${p} netns peer${p} 23 | 24 | ip -n peer0 addr add 10.10.${p}.1/24 dev veth${p} 25 | ip -n peer0 link set veth${p} up 26 | 27 | ip -n peer${p} addr add 10.10.${p}.2/24 dev veth${p} 28 | ip -n peer${p} link set veth${p} up 29 | done 30 | fi 31 | 32 | if [ $ipv6 -eq 1 ]; then 33 | sleep 5 34 | fi 35 | 36 | ip -n peer$1 link add tun0 type ovpn-dco 37 | ip -n peer$1 addr add $2 dev tun0 38 | ip -n peer$1 link set tun0 up 39 | } 40 | 41 | function add_peer() { 42 | if [ $tcp -eq 0 ]; then 43 | if [ $1 -eq 0 ]; then 44 | ip netns exec peer0 $OVPN_CLI tun0 new_multi_peer 1 $UDP_PEERS_FILE 45 | 46 | for p in $(seq 1 $NUM_PEERS); do 47 | # ip netns exec peer0 $OVPN_CLI tun0 new_peer ${p} ${p} 10.10.${p}.2 1 5.5.5.$((${p} + 1)) 48 | ip netns exec peer0 $OVPN_CLI tun0 new_key ${p} $ALG 0 data64.key 49 | done 50 | else 51 | ip netns exec peer${1} $OVPN_CLI tun0 new_peer 1 ${1} 10.10.${1}.1 1 5.5.5.1 52 | ip netns exec peer${1} $OVPN_CLI tun0 new_key ${1} $ALG 1 data64.key 53 | fi 54 | else 55 | if [ $1 -eq 0 ]; then 56 | (ip netns exec peer$1 $OVPN_CLI tun0 listen 1 $TCP_PEERS_FILE && { 57 | for p in $(seq 1 $NUM_PEERS); do 58 | ip netns exec peer0 $OVPN_CLI tun0 new_key ${p} $ALG 0 data64.key 59 | done 60 | }) & 61 | sleep 5 62 | else 63 | ip netns exec peer${1} $OVPN_CLI tun0 connect ${1} 10.10.${1}.1 1 5.5.5.1 64 | ip netns exec peer${1} $OVPN_CLI tun0 new_key ${1} $ALG 1 data64.key 65 | fi 66 | fi 67 | } 68 | 69 | 70 | # clean up 71 | for p in $(seq 1 10); do 72 | ip -n peer0 link del veth${p} 2>/dev/null || true 73 | done 74 | for p in $(seq 0 10); do 75 | ip -n peer${p} link del tun0 2>/dev/null || true 76 | ip netns del peer${p} 2>/dev/null || true 77 | done 78 | 79 | ipv6=0 80 | if [ "$1" == "-6" ]; then 81 | ipv6=1 82 | shift 83 | fi 84 | 85 | tcp=0 86 | if [ "$1" == "-t" ]; then 87 | shift 88 | tcp=1 89 | NUM_PEERS=${NUM_PEERS:-$(wc -l $TCP_PEERS_FILE | awk '{print $1}')} 90 | else 91 | NUM_PEERS=${NUM_PEERS:-$(wc -l $UDP_PEERS_FILE | awk '{print $1}')} 92 | fi 93 | 94 | for p in $(seq 0 $NUM_PEERS); do 95 | create_ns ${p} 96 | done 97 | 98 | if [ $ipv6 -eq 1 ]; then 99 | setup_ns 0 fc00::1 64 5.5.5.1/24 1 fc00::2 2 5.5.5.2 ipv6 100 | setup_ns 1 fc00::2 64 5.5.5.2/24 2 fc00::1 1 5.5.5.1 ipv6 101 | setup_ns 2 fc00::2 64 5.5.5.2/24 2 fc00::1 1 5.5.5.1 ipv6 102 | else 103 | for p in $(seq 0 $NUM_PEERS); do 104 | setup_ns ${p} 5.5.5.$((${p} + 1))/24 105 | done 106 | 107 | for p in $(seq 0 $NUM_PEERS); do 108 | add_peer ${p} 109 | done 110 | fi 111 | 112 | for p in $(seq 1 $NUM_PEERS); do 113 | ip netns exec peer0 ping -qfc 2000 -w 5 5.5.5.$((${p} + 1)) 114 | done 115 | # make clients float.. 116 | for p in $(seq 1 $NUM_PEERS); do 117 | ip -n peer${p} addr del 10.10.${p}.2/24 dev veth${p} 118 | ip -n peer${p} addr add 10.10.${p}.3/24 dev veth${p} 119 | done 120 | for p in $(seq 1 $NUM_PEERS); do 121 | ip netns exec peer${p} ping -qfc 2000 -w 5 5.5.5.1 122 | done 123 | -------------------------------------------------------------------------------- /tests/netns-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # SPDX-License-Identifier: GPL-2.0 3 | # Copyright (C) 2020- OpenVPN, Inc. 4 | # 5 | # Author: Antonio Quartulli 6 | 7 | #set -x 8 | set -e 9 | 10 | UDP_PEERS_FILE=${UDP_PEERS_FILE:-udp_peers.txt} 11 | TCP_PEERS_FILE=${TCP_PEERS_FILE:-tcp_peers.txt} 12 | OVPN_CLI=${OVPN_CLI:-./ovpn-cli} 13 | ALG=${ALG:-aes} 14 | 15 | function create_ns() { 16 | ip netns add peer$1 17 | } 18 | 19 | function setup_ns() { 20 | if [ $1 -eq 0 ]; then 21 | for p in $(seq 1 $NUM_PEERS); do 22 | ip link add veth${p} netns peer0 type veth peer name veth${p} netns peer${p} 23 | 24 | ip -n peer0 addr add 10.10.${p}.1/24 dev veth${p} 25 | ip -n peer0 link set veth${p} up 26 | 27 | ip -n peer${p} addr add 10.10.${p}.2/24 dev veth${p} 28 | ip -n peer${p} link set veth${p} up 29 | done 30 | fi 31 | 32 | if [ $ipv6 -eq 1 ]; then 33 | sleep 5 34 | fi 35 | 36 | ip -n peer$1 link add tun0 type ovpn-dco 37 | ip -n peer$1 addr add $2 dev tun0 38 | ip -n peer$1 link set tun0 up 39 | } 40 | 41 | function add_peer() { 42 | if [ $tcp -eq 0 ]; then 43 | if [ $1 -eq 0 ]; then 44 | ip netns exec peer0 $OVPN_CLI tun0 new_multi_peer 1 $UDP_PEERS_FILE 45 | 46 | for p in $(seq 1 $NUM_PEERS); do 47 | # ip netns exec peer0 $OVPN_CLI tun0 new_peer ${p} ${p} 10.10.${p}.2 1 5.5.5.$((${p} + 1)) 48 | ip netns exec peer0 $OVPN_CLI tun0 new_key ${p} $ALG 0 data64.key 49 | done 50 | else 51 | ip netns exec peer${1} $OVPN_CLI tun0 new_peer 1 ${1} 10.10.${1}.1 1 5.5.5.1 52 | ip netns exec peer${1} $OVPN_CLI tun0 new_key ${1} $ALG 1 data64.key 53 | fi 54 | else 55 | if [ $1 -eq 0 ]; then 56 | (ip netns exec peer$1 $OVPN_CLI tun0 listen 1 $TCP_PEERS_FILE && { 57 | for p in $(seq 1 $NUM_PEERS); do 58 | ip netns exec peer0 $OVPN_CLI tun0 new_key ${p} $ALG 0 data64.key 59 | done 60 | }) & 61 | sleep 5 62 | else 63 | ip netns exec peer${1} $OVPN_CLI tun0 connect ${1} 10.10.${1}.1 1 5.5.5.1 64 | ip netns exec peer${1} $OVPN_CLI tun0 new_key ${1} $ALG 1 data64.key 65 | fi 66 | fi 67 | } 68 | 69 | 70 | # clean up 71 | for p in $(seq 1 10); do 72 | ip -n peer0 link del veth${p} 2>/dev/null || true 73 | done 74 | for p in $(seq 0 10); do 75 | ip -n peer${p} link del tun0 2>/dev/null || true 76 | ip netns del peer${p} 2>/dev/null || true 77 | done 78 | 79 | ipv6=0 80 | if [ "$1" == "-6" ]; then 81 | ipv6=1 82 | shift 83 | fi 84 | 85 | tcp=0 86 | if [ "$1" == "-t" ]; then 87 | shift 88 | tcp=1 89 | NUM_PEERS=${NUM_PEERS:-$(wc -l $TCP_PEERS_FILE | awk '{print $1}')} 90 | else 91 | NUM_PEERS=${NUM_PEERS:-$(wc -l $UDP_PEERS_FILE | awk '{print $1}')} 92 | fi 93 | 94 | for p in $(seq 0 $NUM_PEERS); do 95 | create_ns ${p} 96 | done 97 | 98 | if [ $ipv6 -eq 1 ]; then 99 | setup_ns 0 fc00::1 64 5.5.5.1/24 1 fc00::2 2 5.5.5.2 ipv6 100 | setup_ns 1 fc00::2 64 5.5.5.2/24 2 fc00::1 1 5.5.5.1 ipv6 101 | setup_ns 2 fc00::2 64 5.5.5.2/24 2 fc00::1 1 5.5.5.1 ipv6 102 | else 103 | for p in $(seq 0 $NUM_PEERS); do 104 | setup_ns ${p} 5.5.5.$((${p} + 1))/24 105 | done 106 | 107 | for p in $(seq 0 $NUM_PEERS); do 108 | add_peer ${p} 109 | done 110 | fi 111 | 112 | for p in $(seq 1 $NUM_PEERS); do 113 | ip netns exec peer0 ping -qfc 2000 -w 5 5.5.5.$((${p} + 1)) 114 | done 115 | 116 | echo "Querying all peers:" 117 | ip netns exec peer0 $OVPN_CLI tun0 get_peer 118 | 119 | echo "Querying peer 1:" 120 | ip netns exec peer0 $OVPN_CLI tun0 get_peer 1 121 | 122 | echo "Querying non-existent peer 10:" 123 | ip netns exec peer0 $OVPN_CLI tun0 get_peer 10 || true 124 | 125 | #ip netns exec peer0 $OVPN_CLI tun0 del_peer 1 126 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | == OpenVPN Data Channel Offload in the linux kernel (ovpn-dco) == 2 | 3 | ** MAINTENANCE MODE ** 4 | This repository is currently in maintenance mode and we only accept important 5 | and meaningful bugfixes. 6 | 7 | For the new version of the DCO linux kernel module, please refer to the following 8 | repository: 9 | 10 | https://github.com/OpenVPN/ovpn-net-next 11 | 12 | == License == 13 | 14 | ovpn-dco is released under the terms of the GPLv2 license. 15 | 16 | 17 | == Submitting patches == 18 | 19 | Patches for ovpn-dco can be submitted to the openvpn-devel mailing list at 20 | openvpn-devel@lists.sourceforge.net 21 | 22 | The patch subject *must* start with "ovpn-dco:". This way patches for this 23 | project can easily be dinstinguished from patches for other projects. 24 | At the same time it is part of the kernel guidelines to have subjects starting 25 | with a prefix identifying the component being modified (ovpn-dco in this case). 26 | 27 | To generate patches, please use git-format-patch and git-send-email. 28 | 29 | 30 | == Building == 31 | 32 | To build the ovpn-dco kernel module, just type: 33 | 34 | $ make 35 | 36 | in the root folder. 37 | The Makefile will autodetect your running kernel and will try to use its 38 | headers to get the code compiled. 39 | 40 | If you want to build ovpn-dco against a kernel different from the one 41 | running on the host, run: 42 | 43 | $ make KERNEL_SRC=/path/to/the/kernel/tree 44 | 45 | The control is passed to the kernel Makefile, therefore any kernel Makefile 46 | argument can be specified on the command line and it will be passed 47 | automatically. 48 | 49 | Once done building, executing the command: 50 | 51 | $ make install 52 | 53 | will install the ovpn-dco.ko kernel module in the updates/ subfolder of 54 | the kernel modules directory on your system. 55 | It normally means `/lib/modules/$(uname -r)/updates/`. 56 | 57 | 58 | == Testing == 59 | 60 | A basic pre-shared-key client (called ovpn-cli) is also provided in the 61 | tests/ folder. 62 | It can be compiled by typing: 63 | 64 | $ make tests 65 | 66 | One way to test ovpn-dco is to run multiple tun interfaces on the same hosts 67 | associated with different network namespaces. 68 | A script that takes care of setting up 2 NS and 2 interfaces is provided at 69 | `tests/netns-test.sh`. 70 | 71 | By running this script from the tests folder as follows: 72 | 73 | $ cd tests 74 | $ ./netns-test.sh 75 | 76 | the host will configure a basic tunnel using a pre-shared key (the ovpn-cli 77 | binary is used for this). 78 | 79 | The 2 namespaces are named `peer0` and `peer1`. Each interface is respectively 80 | configured with `5.5.5.1/24` and `5.5.5.2/24`. 81 | 82 | At this point it is possible to make a basic ping test by executing: 83 | 84 | $ ip netns exec peer0 ping 5.5.5.2 85 | 86 | If the command above works, it means that the 2 interfaces are exchanging 87 | traffic properly over the ovpn link. 88 | 89 | Note: running kernel must have network namespaces support compiled in, but it 90 | is fairly standard on modern Linux distros. 91 | 92 | For reference, a sample kernel config file is provided in tests/qemu/config.net-next. 93 | This config file is used for compiling a minimal kernel based on the net-next tree. 94 | 95 | 96 | To run net-next, it's better to rely on any VM, so that the developer does not need 97 | to worry about bugs or spurious kernel crashes. For this reason qemu is suggested 98 | (but any other VM is fine too). At `tests/qemu/launch_deb2.sh` a sample script 99 | can be found that shows how qemu can be launched for testing. 100 | 101 | 102 | == Limitations == 103 | 104 | This is a list of current limitations which are planned to be removed as we move forward: 105 | * Only AEAD mode and 'none' (with no auth) supported 106 | * Only AES-GCM and CHACHA20POLY1305 ciphers supported 107 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/pktid.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: Antonio Quartulli 7 | * James Yonan 8 | */ 9 | 10 | #ifndef _NET_OVPN_DCO_OVPNPKTID_H_ 11 | #define _NET_OVPN_DCO_OVPNPKTID_H_ 12 | 13 | #include "main.h" 14 | 15 | /* When the OpenVPN protocol is run in AEAD mode, use 16 | * the OpenVPN packet ID as the AEAD nonce: 17 | * 18 | * 00000005 521c3b01 4308c041 19 | * [seq # ] [ nonce_tail ] 20 | * [ 12-byte full IV ] -> NONCE_SIZE 21 | * [4-bytes -> NONCE_WIRE_SIZE 22 | * on wire] 23 | */ 24 | 25 | /* OpenVPN nonce size */ 26 | #define NONCE_SIZE 12 27 | /* amount of bytes of the nonce received from user space */ 28 | #define NONCE_TAIL_SIZE 8 29 | 30 | /* OpenVPN nonce size reduced by 8-byte nonce tail -- this is the 31 | * size of the AEAD Associated Data (AD) sent over the wire 32 | * and is normally the head of the IV 33 | */ 34 | #define NONCE_WIRE_SIZE (NONCE_SIZE - sizeof(struct ovpn_nonce_tail)) 35 | 36 | /* If no packets received for this length of time, set a backtrack floor 37 | * at highest received packet ID thus far. 38 | */ 39 | #define PKTID_RECV_EXPIRE (30 * HZ) 40 | 41 | /* Last 8 bytes of AEAD nonce 42 | * Provided by userspace and usually derived from 43 | * key material generated during TLS handshake 44 | */ 45 | struct ovpn_nonce_tail { 46 | u8 u8[NONCE_TAIL_SIZE]; 47 | }; 48 | 49 | /* Packet-ID state for transmitter */ 50 | struct ovpn_pktid_xmit { 51 | atomic64_t seq_num; 52 | struct ovpn_tcp_linear *tcp_linear; 53 | }; 54 | 55 | /* replay window sizing in bytes = 2^REPLAY_WINDOW_ORDER */ 56 | #define REPLAY_WINDOW_ORDER 8 57 | 58 | #define REPLAY_WINDOW_BYTES BIT(REPLAY_WINDOW_ORDER) 59 | #define REPLAY_WINDOW_SIZE (REPLAY_WINDOW_BYTES * 8) 60 | #define REPLAY_INDEX(base, i) (((base) + (i)) & (REPLAY_WINDOW_SIZE - 1)) 61 | 62 | /* Packet-ID state for receiver. 63 | * Other than lock member, can be zeroed to initialize. 64 | */ 65 | struct ovpn_pktid_recv { 66 | /* "sliding window" bitmask of recent packet IDs received */ 67 | u8 history[REPLAY_WINDOW_BYTES]; 68 | /* bit position of deque base in history */ 69 | unsigned int base; 70 | /* extent (in bits) of deque in history */ 71 | unsigned int extent; 72 | /* expiration of history in jiffies */ 73 | unsigned long expire; 74 | /* highest sequence number received */ 75 | u32 id; 76 | /* highest time stamp received */ 77 | u32 time; 78 | /* we will only accept backtrack IDs > id_floor */ 79 | u32 id_floor; 80 | unsigned int max_backtrack; 81 | /* protects entire pktd ID state */ 82 | spinlock_t lock; 83 | }; 84 | 85 | /* Get the next packet ID for xmit */ 86 | static inline int ovpn_pktid_xmit_next(struct ovpn_pktid_xmit *pid, u32 *pktid) 87 | { 88 | const s64 seq_num = atomic64_fetch_add_unless(&pid->seq_num, 1, 89 | 0x100000000LL); 90 | /* when the 32bit space is over, we return an error because the packet ID is used to create 91 | * the cipher IV and we do not want to re-use the same value more than once 92 | */ 93 | if (unlikely(seq_num == 0x100000000LL)) 94 | return -ERANGE; 95 | 96 | *pktid = (u32)seq_num; 97 | 98 | return 0; 99 | } 100 | 101 | /* Write 12-byte AEAD IV to dest */ 102 | static inline void ovpn_pktid_aead_write(const u32 pktid, 103 | const struct ovpn_nonce_tail *nt, 104 | unsigned char *dest) 105 | { 106 | *(__force __be32 *)(dest) = htonl(pktid); 107 | BUILD_BUG_ON(4 + sizeof(struct ovpn_nonce_tail) != NONCE_SIZE); 108 | memcpy(dest + 4, nt->u8, sizeof(struct ovpn_nonce_tail)); 109 | } 110 | 111 | void ovpn_pktid_xmit_init(struct ovpn_pktid_xmit *pid); 112 | void ovpn_pktid_recv_init(struct ovpn_pktid_recv *pr); 113 | 114 | int ovpn_pktid_recv(struct ovpn_pktid_recv *pr, u32 pkt_id, u32 pkt_time); 115 | 116 | #endif /* _NET_OVPN_DCO_OVPNPKTID_H_ */ 117 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/sock.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: James Yonan 7 | * Antonio Quartulli 8 | */ 9 | 10 | #include "main.h" 11 | #include "ovpn.h" 12 | #include "peer.h" 13 | #include "sock.h" 14 | #include "rcu.h" 15 | #include "tcp.h" 16 | #include "udp.h" 17 | 18 | #include 19 | #include 20 | 21 | /* Finalize release of socket, called after RCU grace period */ 22 | static void ovpn_socket_detach(struct socket *sock) 23 | { 24 | if (!sock) 25 | return; 26 | 27 | if (sock->sk->sk_protocol == IPPROTO_UDP) 28 | ovpn_udp_socket_detach(sock); 29 | else if (sock->sk->sk_protocol == IPPROTO_TCP) 30 | ovpn_tcp_socket_detach(sock); 31 | 32 | sockfd_put(sock); 33 | } 34 | 35 | void ovpn_socket_release_kref(struct kref *kref) 36 | { 37 | struct ovpn_socket *sock = container_of(kref, struct ovpn_socket, refcount); 38 | 39 | ovpn_socket_detach(sock->sock); 40 | kfree_rcu(sock, rcu); 41 | } 42 | 43 | static bool ovpn_socket_hold(struct ovpn_socket *sock) 44 | { 45 | return kref_get_unless_zero(&sock->refcount); 46 | } 47 | 48 | static struct ovpn_socket *ovpn_socket_get(struct socket *sock) 49 | { 50 | struct ovpn_socket *ovpn_sock; 51 | 52 | rcu_read_lock(); 53 | ovpn_sock = rcu_dereference_sk_user_data(sock->sk); 54 | if (!ovpn_socket_hold(ovpn_sock)) { 55 | pr_warn("%s: found ovpn_socket with ref = 0\n", __func__); 56 | ovpn_sock = NULL; 57 | } 58 | rcu_read_unlock(); 59 | 60 | return ovpn_sock; 61 | } 62 | 63 | /* Finalize release of socket, called after RCU grace period */ 64 | static int ovpn_socket_attach(struct socket *sock, struct ovpn_peer *peer) 65 | { 66 | int ret = -EOPNOTSUPP; 67 | 68 | if (!sock || !peer) 69 | return -EINVAL; 70 | 71 | if (sock->sk->sk_protocol == IPPROTO_UDP) 72 | ret = ovpn_udp_socket_attach(sock, peer->ovpn); 73 | else if (sock->sk->sk_protocol == IPPROTO_TCP) 74 | ret = ovpn_tcp_socket_attach(sock, peer); 75 | 76 | return ret; 77 | } 78 | 79 | struct ovpn_struct *ovpn_from_udp_sock(struct sock *sk) 80 | { 81 | struct ovpn_socket *ovpn_sock; 82 | 83 | ovpn_rcu_lockdep_assert_held(); 84 | 85 | if (unlikely(READ_ONCE(udp_sk(sk)->encap_type) != UDP_ENCAP_OVPNINUDP)) 86 | return NULL; 87 | 88 | ovpn_sock = rcu_dereference_sk_user_data(sk); 89 | if (unlikely(!ovpn_sock)) 90 | return NULL; 91 | 92 | /* make sure that sk matches our stored transport socket */ 93 | if (unlikely(!ovpn_sock->sock || sk != ovpn_sock->sock->sk)) 94 | return NULL; 95 | 96 | return ovpn_sock->ovpn; 97 | } 98 | 99 | struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer) 100 | { 101 | struct ovpn_socket *ovpn_sock; 102 | int ret; 103 | 104 | ret = ovpn_socket_attach(sock, peer); 105 | if (ret < 0 && ret != -EALREADY) 106 | return ERR_PTR(ret); 107 | 108 | /* if this socket is already owned by this interface, just increase the refcounter */ 109 | if (ret == -EALREADY) { 110 | /* caller is expected to increase the sock refcounter before passing it to this 111 | * function. For this reason we drop it if not needed, like when this socket is 112 | * already owned. 113 | */ 114 | ovpn_sock = ovpn_socket_get(sock); 115 | sockfd_put(sock); 116 | return ovpn_sock; 117 | } 118 | 119 | ovpn_sock = kzalloc(sizeof(*ovpn_sock), GFP_KERNEL); 120 | if (!ovpn_sock) 121 | return ERR_PTR(-ENOMEM); 122 | 123 | ovpn_sock->ovpn = peer->ovpn; 124 | ovpn_sock->sock = sock; 125 | kref_init(&ovpn_sock->refcount); 126 | 127 | /* TCP sockets are per-peer, therefore they are linked to their unique peer */ 128 | if (sock->sk->sk_protocol == IPPROTO_TCP) { 129 | ovpn_sock->peer = peer; 130 | ret = ptr_ring_init(&ovpn_sock->recv_ring, OVPN_QUEUE_LEN, GFP_KERNEL); 131 | if (ret < 0) { 132 | netdev_err(peer->ovpn->dev, "%s: cannot allocate TCP recv ring\n", 133 | __func__); 134 | goto err; 135 | } 136 | } 137 | 138 | rcu_assign_sk_user_data(sock->sk, ovpn_sock); 139 | 140 | return ovpn_sock; 141 | err: 142 | kfree(ovpn_sock); 143 | return ERR_PTR(ret); 144 | } 145 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/crypto.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: James Yonan 7 | * Antonio Quartulli 8 | */ 9 | 10 | #ifndef _NET_OVPN_DCO_OVPNCRYPTO_H_ 11 | #define _NET_OVPN_DCO_OVPNCRYPTO_H_ 12 | 13 | #include "main.h" 14 | #include "pktid.h" 15 | 16 | #include 17 | #include 18 | 19 | struct ovpn_peer; 20 | struct ovpn_crypto_key_slot; 21 | 22 | /* info needed for both encrypt and decrypt directions */ 23 | struct ovpn_key_direction { 24 | const u8 *cipher_key; 25 | size_t cipher_key_size; 26 | const u8 *nonce_tail; /* only needed for GCM modes */ 27 | size_t nonce_tail_size; /* only needed for GCM modes */ 28 | }; 29 | 30 | /* all info for a particular symmetric key (primary or secondary) */ 31 | struct ovpn_key_config { 32 | enum ovpn_cipher_alg cipher_alg; 33 | u8 key_id; 34 | struct ovpn_key_direction encrypt; 35 | struct ovpn_key_direction decrypt; 36 | }; 37 | 38 | /* used to pass settings from netlink to the crypto engine */ 39 | struct ovpn_peer_key_reset { 40 | enum ovpn_key_slot slot; 41 | struct ovpn_key_config key; 42 | }; 43 | 44 | struct ovpn_crypto_key_slot { 45 | u8 key_id; 46 | 47 | struct crypto_aead *encrypt; 48 | struct crypto_aead *decrypt; 49 | struct ovpn_nonce_tail nonce_tail_xmit; 50 | struct ovpn_nonce_tail nonce_tail_recv; 51 | 52 | struct ovpn_pktid_recv pid_recv ____cacheline_aligned_in_smp; 53 | struct ovpn_pktid_xmit pid_xmit ____cacheline_aligned_in_smp; 54 | struct kref refcount; 55 | struct rcu_head rcu; 56 | }; 57 | 58 | struct ovpn_crypto_state { 59 | struct ovpn_crypto_key_slot __rcu *primary; 60 | struct ovpn_crypto_key_slot __rcu *secondary; 61 | 62 | /* protects primary and secondary slots */ 63 | struct mutex mutex; 64 | }; 65 | 66 | static inline bool ovpn_crypto_key_slot_hold(struct ovpn_crypto_key_slot *ks) 67 | { 68 | return kref_get_unless_zero(&ks->refcount); 69 | } 70 | 71 | static inline void ovpn_crypto_state_init(struct ovpn_crypto_state *cs) 72 | { 73 | RCU_INIT_POINTER(cs->primary, NULL); 74 | RCU_INIT_POINTER(cs->secondary, NULL); 75 | mutex_init(&cs->mutex); 76 | } 77 | 78 | static inline struct ovpn_crypto_key_slot * 79 | ovpn_crypto_key_id_to_slot(const struct ovpn_crypto_state *cs, u8 key_id) 80 | { 81 | struct ovpn_crypto_key_slot *ks; 82 | 83 | if (unlikely(!cs)) 84 | return NULL; 85 | 86 | rcu_read_lock(); 87 | ks = rcu_dereference(cs->primary); 88 | if (ks && ks->key_id == key_id) { 89 | if (unlikely(!ovpn_crypto_key_slot_hold(ks))) 90 | ks = NULL; 91 | goto out; 92 | } 93 | 94 | ks = rcu_dereference(cs->secondary); 95 | if (ks && ks->key_id == key_id) { 96 | if (unlikely(!ovpn_crypto_key_slot_hold(ks))) 97 | ks = NULL; 98 | goto out; 99 | } 100 | 101 | /* when both key slots are occupied but no matching key ID is found, ks has to be reset to 102 | * NULL to avoid carrying a stale pointer 103 | */ 104 | ks = NULL; 105 | out: 106 | rcu_read_unlock(); 107 | 108 | return ks; 109 | } 110 | 111 | static inline struct ovpn_crypto_key_slot * 112 | ovpn_crypto_key_slot_primary(const struct ovpn_crypto_state *cs) 113 | { 114 | struct ovpn_crypto_key_slot *ks; 115 | 116 | rcu_read_lock(); 117 | ks = rcu_dereference(cs->primary); 118 | if (unlikely(ks && !ovpn_crypto_key_slot_hold(ks))) 119 | ks = NULL; 120 | rcu_read_unlock(); 121 | 122 | return ks; 123 | } 124 | 125 | void ovpn_crypto_key_slot_release(struct kref *kref); 126 | 127 | static inline void ovpn_crypto_key_slot_put(struct ovpn_crypto_key_slot *ks) 128 | { 129 | kref_put(&ks->refcount, ovpn_crypto_key_slot_release); 130 | } 131 | 132 | int ovpn_crypto_state_reset(struct ovpn_crypto_state *cs, 133 | const struct ovpn_peer_key_reset *pkr); 134 | 135 | void ovpn_crypto_key_slot_delete(struct ovpn_crypto_state *cs, 136 | enum ovpn_key_slot slot); 137 | 138 | void ovpn_crypto_state_release(struct ovpn_crypto_state *cs); 139 | 140 | void ovpn_crypto_key_slots_swap(struct ovpn_crypto_state *cs); 141 | 142 | void ovpn_crypto_kill_primary(struct ovpn_crypto_state *cs); 143 | 144 | #endif /* _NET_OVPN_DCO_OVPNCRYPTO_H_ */ 145 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/crypto.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: James Yonan 7 | * Antonio Quartulli 8 | */ 9 | 10 | #include "main.h" 11 | #include "crypto_aead.h" 12 | #include "crypto.h" 13 | 14 | #include 15 | 16 | static void ovpn_ks_destroy_rcu(struct rcu_head *head) 17 | { 18 | struct ovpn_crypto_key_slot *ks; 19 | 20 | ks = container_of(head, struct ovpn_crypto_key_slot, rcu); 21 | ovpn_aead_crypto_key_slot_destroy(ks); 22 | } 23 | 24 | void ovpn_crypto_key_slot_release(struct kref *kref) 25 | { 26 | struct ovpn_crypto_key_slot *ks; 27 | 28 | ks = container_of(kref, struct ovpn_crypto_key_slot, refcount); 29 | call_rcu(&ks->rcu, ovpn_ks_destroy_rcu); 30 | } 31 | 32 | /* can only be invoked when all peer references have been dropped (i.e. RCU 33 | * release routine) 34 | */ 35 | void ovpn_crypto_state_release(struct ovpn_crypto_state *cs) 36 | { 37 | struct ovpn_crypto_key_slot *ks; 38 | 39 | ks = rcu_access_pointer(cs->primary); 40 | if (ks) { 41 | RCU_INIT_POINTER(cs->primary, NULL); 42 | ovpn_crypto_key_slot_put(ks); 43 | } 44 | 45 | ks = rcu_access_pointer(cs->secondary); 46 | if (ks) { 47 | RCU_INIT_POINTER(cs->secondary, NULL); 48 | ovpn_crypto_key_slot_put(ks); 49 | } 50 | 51 | mutex_destroy(&cs->mutex); 52 | } 53 | 54 | /* removes the primary key from the crypto context */ 55 | void ovpn_crypto_kill_primary(struct ovpn_crypto_state *cs) 56 | { 57 | struct ovpn_crypto_key_slot *ks; 58 | 59 | mutex_lock(&cs->mutex); 60 | ks = rcu_replace_pointer(cs->primary, NULL, lockdep_is_held(&cs->mutex)); 61 | ovpn_crypto_key_slot_put(ks); 62 | mutex_unlock(&cs->mutex); 63 | } 64 | 65 | /* Reset the ovpn_crypto_state object in a way that is atomic 66 | * to RCU readers. 67 | */ 68 | int ovpn_crypto_state_reset(struct ovpn_crypto_state *cs, 69 | const struct ovpn_peer_key_reset *pkr) 70 | __must_hold(cs->mutex) 71 | { 72 | struct ovpn_crypto_key_slot *old = NULL; 73 | struct ovpn_crypto_key_slot *new; 74 | 75 | lockdep_assert_held(&cs->mutex); 76 | 77 | new = ovpn_aead_crypto_key_slot_new(&pkr->key); 78 | if (IS_ERR(new)) 79 | return PTR_ERR(new); 80 | 81 | switch (pkr->slot) { 82 | case OVPN_KEY_SLOT_PRIMARY: 83 | old = rcu_replace_pointer(cs->primary, new, 84 | lockdep_is_held(&cs->mutex)); 85 | break; 86 | case OVPN_KEY_SLOT_SECONDARY: 87 | old = rcu_replace_pointer(cs->secondary, new, 88 | lockdep_is_held(&cs->mutex)); 89 | break; 90 | default: 91 | goto free_key; 92 | } 93 | 94 | if (old) 95 | ovpn_crypto_key_slot_put(old); 96 | 97 | return 0; 98 | free_key: 99 | ovpn_crypto_key_slot_put(new); 100 | return -EINVAL; 101 | } 102 | 103 | void ovpn_crypto_key_slot_delete(struct ovpn_crypto_state *cs, 104 | enum ovpn_key_slot slot) 105 | { 106 | struct ovpn_crypto_key_slot *ks = NULL; 107 | 108 | mutex_lock(&cs->mutex); 109 | switch (slot) { 110 | case OVPN_KEY_SLOT_PRIMARY: 111 | ks = rcu_replace_pointer(cs->primary, NULL, 112 | lockdep_is_held(&cs->mutex)); 113 | break; 114 | case OVPN_KEY_SLOT_SECONDARY: 115 | ks = rcu_replace_pointer(cs->secondary, NULL, 116 | lockdep_is_held(&cs->mutex)); 117 | break; 118 | default: 119 | pr_warn("Invalid slot to release: %u\n", slot); 120 | break; 121 | } 122 | mutex_unlock(&cs->mutex); 123 | 124 | if (!ks) { 125 | pr_debug("Key slot already released: %u\n", slot); 126 | return; 127 | } 128 | pr_debug("deleting key slot %u, key_id=%u\n", slot, ks->key_id); 129 | 130 | ovpn_crypto_key_slot_put(ks); 131 | } 132 | 133 | /* this swap is not atomic, but there will be a very short time frame where the 134 | * old_secondary key won't be available. This should not be a big deal as most 135 | * likely both peers are already using the new primary at this point. 136 | */ 137 | void ovpn_crypto_key_slots_swap(struct ovpn_crypto_state *cs) 138 | { 139 | const struct ovpn_crypto_key_slot *old_primary, *old_secondary; 140 | 141 | mutex_lock(&cs->mutex); 142 | 143 | old_secondary = rcu_dereference_protected(cs->secondary, 144 | lockdep_is_held(&cs->mutex)); 145 | old_primary = rcu_replace_pointer(cs->primary, old_secondary, 146 | lockdep_is_held(&cs->mutex)); 147 | rcu_assign_pointer(cs->secondary, old_primary); 148 | 149 | pr_debug("key swapped: %u <-> %u\n", 150 | old_primary ? old_primary->key_id : 0, 151 | old_secondary ? old_secondary->key_id : 0); 152 | 153 | mutex_unlock(&cs->mutex); 154 | } 155 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/peer.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: James Yonan 7 | * Antonio Quartulli 8 | */ 9 | 10 | #ifndef _NET_OVPN_DCO_OVPNPEER_H_ 11 | #define _NET_OVPN_DCO_OVPNPEER_H_ 12 | 13 | #include "addr.h" 14 | #include "bind.h" 15 | #include "sock.h" 16 | #include "stats.h" 17 | 18 | #include 19 | #include 20 | #include 21 | 22 | struct ovpn_peer { 23 | struct ovpn_struct *ovpn; 24 | 25 | u32 id; 26 | 27 | struct { 28 | struct in_addr ipv4; 29 | struct in6_addr ipv6; 30 | } vpn_addrs; 31 | 32 | struct hlist_node hash_entry_id; 33 | struct hlist_node hash_entry_addr4; 34 | struct hlist_node hash_entry_addr6; 35 | struct hlist_node hash_entry_transp_addr; 36 | 37 | /* work objects to handle encryption/decryption of packets. 38 | * these works are queued on the ovpn->crypt_wq workqueue. 39 | */ 40 | struct work_struct encrypt_work; 41 | struct work_struct decrypt_work; 42 | 43 | struct ptr_ring tx_ring; 44 | struct ptr_ring rx_ring; 45 | struct ptr_ring netif_rx_ring; 46 | 47 | struct napi_struct napi; 48 | 49 | struct ovpn_socket *sock; 50 | 51 | /* state of the TCP reading. Needed to keep track of how much of a single packet has already 52 | * been read from the stream and how much is missing 53 | */ 54 | struct { 55 | struct ptr_ring tx_ring; 56 | struct work_struct tx_work; 57 | struct work_struct rx_work; 58 | 59 | u8 raw_len[sizeof(u16)]; 60 | struct sk_buff *skb; 61 | u16 offset; 62 | u16 data_len; 63 | struct { 64 | void (*sk_state_change)(struct sock *sk); 65 | void (*sk_data_ready)(struct sock *sk); 66 | void (*sk_write_space)(struct sock *sk); 67 | struct proto *prot; 68 | } sk_cb; 69 | } tcp; 70 | 71 | struct dst_cache dst_cache; 72 | 73 | /* our crypto state */ 74 | struct ovpn_crypto_state crypto; 75 | 76 | /* our binding to peer, protected by spinlock */ 77 | struct ovpn_bind __rcu *bind; 78 | 79 | /* timer used to send periodic ping messages to the other peer, if no 80 | * other data was sent within the past keepalive_interval seconds 81 | */ 82 | struct timer_list keepalive_xmit; 83 | /* keepalive interval in seconds */ 84 | unsigned long keepalive_interval; 85 | 86 | /* timer used to mark a peer as expired when no data is received for 87 | * keepalive_timeout seconds 88 | */ 89 | struct timer_list keepalive_recv; 90 | /* keepalive timeout in seconds */ 91 | unsigned long keepalive_timeout; 92 | 93 | /* true if ovpn_peer_mark_delete was called */ 94 | bool halt; 95 | 96 | /* per-peer in-VPN rx/tx stats */ 97 | struct ovpn_peer_stats vpn_stats; 98 | 99 | /* per-peer link/transport rx/tx stats */ 100 | struct ovpn_peer_stats link_stats; 101 | 102 | /* why peer was deleted - keepalive timeout, module removed etc */ 103 | enum ovpn_del_peer_reason delete_reason; 104 | 105 | /* protects binding to peer (bind) and timers 106 | * (keepalive_xmit, keepalive_expire) 107 | */ 108 | spinlock_t lock; 109 | 110 | /* needed because crypto methods can go async */ 111 | struct kref refcount; 112 | 113 | /* needed to free a peer in an RCU safe way */ 114 | struct rcu_head rcu; 115 | 116 | /* needed to notify userspace about deletion */ 117 | struct work_struct delete_work; 118 | }; 119 | 120 | void ovpn_peer_release_kref(struct kref *kref); 121 | void ovpn_peer_release(struct ovpn_peer *peer); 122 | 123 | static inline bool ovpn_peer_hold(struct ovpn_peer *peer) 124 | { 125 | return kref_get_unless_zero(&peer->refcount); 126 | } 127 | 128 | static inline void ovpn_peer_put(struct ovpn_peer *peer) 129 | { 130 | kref_put(&peer->refcount, ovpn_peer_release_kref); 131 | } 132 | 133 | static inline void ovpn_peer_keepalive_recv_reset(struct ovpn_peer *peer) 134 | { 135 | u32 delta = msecs_to_jiffies(peer->keepalive_timeout * MSEC_PER_SEC); 136 | 137 | if (unlikely(!delta)) 138 | return; 139 | 140 | mod_timer(&peer->keepalive_recv, jiffies + delta); 141 | } 142 | 143 | static inline void ovpn_peer_keepalive_xmit_reset(struct ovpn_peer *peer) 144 | { 145 | u32 delta = msecs_to_jiffies(peer->keepalive_interval * MSEC_PER_SEC); 146 | 147 | if (unlikely(!delta)) 148 | return; 149 | 150 | mod_timer(&peer->keepalive_xmit, jiffies + delta); 151 | } 152 | 153 | struct ovpn_peer *ovpn_peer_new(struct ovpn_struct *ovpn, const struct sockaddr_storage *sa, 154 | struct socket *sock, u32 id, uint8_t *local_ip); 155 | 156 | void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout); 157 | 158 | int ovpn_peer_add(struct ovpn_struct *ovpn, struct ovpn_peer *peer); 159 | int ovpn_peer_del(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason); 160 | struct ovpn_peer *ovpn_peer_find(struct ovpn_struct *ovpn, u32 peer_id); 161 | void ovpn_peer_release_p2p(struct ovpn_struct *ovpn); 162 | void ovpn_peers_free(struct ovpn_struct *ovpn); 163 | 164 | struct ovpn_peer *ovpn_peer_lookup_transp_addr(struct ovpn_struct *ovpn, struct sk_buff *skb); 165 | struct ovpn_peer *ovpn_peer_lookup_vpn_addr(struct ovpn_struct *ovpn, struct sk_buff *skb, 166 | bool use_src); 167 | struct ovpn_peer *ovpn_peer_lookup_id(struct ovpn_struct *ovpn, u32 peer_id); 168 | 169 | void ovpn_peer_update_local_endpoint(struct ovpn_peer *peer, struct sk_buff *skb); 170 | void ovpn_peer_float(struct ovpn_peer *peer, struct sk_buff *skb); 171 | 172 | #endif /* _NET_OVPN_DCO_OVPNPEER_H_ */ 173 | -------------------------------------------------------------------------------- /include/uapi/linux/ovpn_dco.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: (GPL-2.0-only WITH Linux-syscall-note) OR MIT */ 2 | /* 3 | * OpenVPN data channel accelerator 4 | * 5 | * Copyright (C) 2019-2023 OpenVPN, Inc. 6 | * 7 | * Author: James Yonan 8 | * Antonio Quartulli 9 | */ 10 | 11 | #ifndef _UAPI_LINUX_OVPN_DCO_H_ 12 | #define _UAPI_LINUX_OVPN_DCO_H_ 13 | 14 | #define OVPN_NL_NAME "ovpn-dco-v2" 15 | 16 | #define OVPN_NL_MULTICAST_GROUP_PEERS "peers" 17 | 18 | /** 19 | * enum ovpn_nl_commands - supported netlink commands 20 | */ 21 | enum ovpn_nl_commands { 22 | /** 23 | * @OVPN_CMD_UNSPEC: unspecified command to catch errors 24 | */ 25 | OVPN_CMD_UNSPEC = 0, 26 | 27 | /** 28 | * @OVPN_CMD_NEW_PEER: Configure peer with its crypto keys 29 | */ 30 | OVPN_CMD_NEW_PEER, 31 | 32 | /** 33 | * @OVPN_CMD_SET_PEER: Tweak parameters for an existing peer 34 | */ 35 | OVPN_CMD_SET_PEER, 36 | 37 | /** 38 | * @OVPN_CMD_DEL_PEER: Remove peer from internal table 39 | */ 40 | OVPN_CMD_DEL_PEER, 41 | 42 | OVPN_CMD_NEW_KEY, 43 | 44 | OVPN_CMD_SWAP_KEYS, 45 | 46 | OVPN_CMD_DEL_KEY, 47 | 48 | /** 49 | * @OVPN_CMD_GET_PEER: Retrieve the status of a peer or all peers 50 | */ 51 | OVPN_CMD_GET_PEER, 52 | }; 53 | 54 | enum ovpn_cipher_alg { 55 | /** 56 | * @OVPN_CIPHER_ALG_NONE: No encryption - reserved for debugging only 57 | */ 58 | OVPN_CIPHER_ALG_NONE = 0, 59 | /** 60 | * @OVPN_CIPHER_ALG_AES_GCM: AES-GCM AEAD cipher with any allowed key size 61 | */ 62 | OVPN_CIPHER_ALG_AES_GCM, 63 | /** 64 | * @OVPN_CIPHER_ALG_CHACHA20_POLY1305: ChaCha20Poly1305 AEAD cipher 65 | */ 66 | OVPN_CIPHER_ALG_CHACHA20_POLY1305, 67 | }; 68 | 69 | enum ovpn_del_peer_reason { 70 | __OVPN_DEL_PEER_REASON_FIRST, 71 | OVPN_DEL_PEER_REASON_TEARDOWN = __OVPN_DEL_PEER_REASON_FIRST, 72 | OVPN_DEL_PEER_REASON_USERSPACE, 73 | OVPN_DEL_PEER_REASON_EXPIRED, 74 | OVPN_DEL_PEER_REASON_TRANSPORT_ERROR, 75 | OVPN_DEL_PEER_REASON_TRANSPORT_DISCONNECT, 76 | __OVPN_DEL_PEER_REASON_AFTER_LAST 77 | }; 78 | 79 | enum ovpn_key_slot { 80 | __OVPN_KEY_SLOT_FIRST, 81 | OVPN_KEY_SLOT_PRIMARY = __OVPN_KEY_SLOT_FIRST, 82 | OVPN_KEY_SLOT_SECONDARY, 83 | __OVPN_KEY_SLOT_AFTER_LAST, 84 | }; 85 | 86 | enum ovpn_netlink_attrs { 87 | OVPN_ATTR_UNSPEC = 0, 88 | OVPN_ATTR_IFINDEX, 89 | OVPN_ATTR_NEW_PEER, 90 | OVPN_ATTR_SET_PEER, 91 | OVPN_ATTR_DEL_PEER, 92 | OVPN_ATTR_NEW_KEY, 93 | OVPN_ATTR_SWAP_KEYS, 94 | OVPN_ATTR_DEL_KEY, 95 | OVPN_ATTR_GET_PEER, 96 | 97 | __OVPN_ATTR_AFTER_LAST, 98 | OVPN_ATTR_MAX = __OVPN_ATTR_AFTER_LAST - 1, 99 | }; 100 | 101 | enum ovpn_netlink_key_dir_attrs { 102 | OVPN_KEY_DIR_ATTR_UNSPEC = 0, 103 | OVPN_KEY_DIR_ATTR_CIPHER_KEY, 104 | OVPN_KEY_DIR_ATTR_NONCE_TAIL, 105 | 106 | __OVPN_KEY_DIR_ATTR_AFTER_LAST, 107 | OVPN_KEY_DIR_ATTR_MAX = __OVPN_KEY_DIR_ATTR_AFTER_LAST - 1, 108 | }; 109 | 110 | enum ovpn_netlink_new_key_attrs { 111 | OVPN_NEW_KEY_ATTR_UNSPEC = 0, 112 | OVPN_NEW_KEY_ATTR_PEER_ID, 113 | OVPN_NEW_KEY_ATTR_KEY_SLOT, 114 | OVPN_NEW_KEY_ATTR_KEY_ID, 115 | OVPN_NEW_KEY_ATTR_CIPHER_ALG, 116 | OVPN_NEW_KEY_ATTR_ENCRYPT_KEY, 117 | OVPN_NEW_KEY_ATTR_DECRYPT_KEY, 118 | 119 | __OVPN_NEW_KEY_ATTR_AFTER_LAST, 120 | OVPN_NEW_KEY_ATTR_MAX = __OVPN_NEW_KEY_ATTR_AFTER_LAST - 1, 121 | }; 122 | 123 | enum ovpn_netlink_del_key_attrs { 124 | OVPN_DEL_KEY_ATTR_UNSPEC = 0, 125 | OVPN_DEL_KEY_ATTR_PEER_ID, 126 | OVPN_DEL_KEY_ATTR_KEY_SLOT, 127 | 128 | __OVPN_DEL_KEY_ATTR_AFTER_LAST, 129 | OVPN_DEL_KEY_ATTR_MAX = __OVPN_DEL_KEY_ATTR_AFTER_LAST - 1, 130 | }; 131 | 132 | enum ovpn_netlink_swap_keys_attrs { 133 | OVPN_SWAP_KEYS_ATTR_UNSPEC = 0, 134 | OVPN_SWAP_KEYS_ATTR_PEER_ID, 135 | 136 | __OVPN_SWAP_KEYS_ATTR_AFTER_LAST, 137 | OVPN_SWAP_KEYS_ATTR_MAX = __OVPN_SWAP_KEYS_ATTR_AFTER_LAST - 1, 138 | 139 | }; 140 | 141 | enum ovpn_netlink_new_peer_attrs { 142 | OVPN_NEW_PEER_ATTR_UNSPEC = 0, 143 | OVPN_NEW_PEER_ATTR_PEER_ID, 144 | OVPN_NEW_PEER_ATTR_SOCKADDR_REMOTE, 145 | OVPN_NEW_PEER_ATTR_SOCKET, 146 | OVPN_NEW_PEER_ATTR_IPV4, 147 | OVPN_NEW_PEER_ATTR_IPV6, 148 | OVPN_NEW_PEER_ATTR_LOCAL_IP, 149 | 150 | __OVPN_NEW_PEER_ATTR_AFTER_LAST, 151 | OVPN_NEW_PEER_ATTR_MAX = __OVPN_NEW_PEER_ATTR_AFTER_LAST - 1, 152 | }; 153 | 154 | enum ovpn_netlink_set_peer_attrs { 155 | OVPN_SET_PEER_ATTR_UNSPEC = 0, 156 | OVPN_SET_PEER_ATTR_PEER_ID, 157 | OVPN_SET_PEER_ATTR_KEEPALIVE_INTERVAL, 158 | OVPN_SET_PEER_ATTR_KEEPALIVE_TIMEOUT, 159 | 160 | __OVPN_SET_PEER_ATTR_AFTER_LAST, 161 | OVPN_SET_PEER_ATTR_MAX = __OVPN_SET_PEER_ATTR_AFTER_LAST - 1, 162 | }; 163 | 164 | enum ovpn_netlink_del_peer_attrs { 165 | OVPN_DEL_PEER_ATTR_UNSPEC = 0, 166 | OVPN_DEL_PEER_ATTR_REASON, 167 | OVPN_DEL_PEER_ATTR_PEER_ID, 168 | 169 | __OVPN_DEL_PEER_ATTR_AFTER_LAST, 170 | OVPN_DEL_PEER_ATTR_MAX = __OVPN_DEL_PEER_ATTR_AFTER_LAST - 1, 171 | }; 172 | 173 | enum ovpn_netlink_get_peer_attrs { 174 | OVPN_GET_PEER_ATTR_UNSPEC = 0, 175 | OVPN_GET_PEER_ATTR_PEER_ID, 176 | 177 | __OVPN_GET_PEER_ATTR_AFTER_LAST, 178 | OVPN_GET_PEER_ATTR_MAX = __OVPN_GET_PEER_ATTR_AFTER_LAST - 1, 179 | }; 180 | 181 | enum ovpn_netlink_get_peer_response_attrs { 182 | OVPN_GET_PEER_RESP_ATTR_UNSPEC = 0, 183 | OVPN_GET_PEER_RESP_ATTR_PEER_ID, 184 | OVPN_GET_PEER_RESP_ATTR_SOCKADDR_REMOTE, 185 | OVPN_GET_PEER_RESP_ATTR_IPV4, 186 | OVPN_GET_PEER_RESP_ATTR_IPV6, 187 | OVPN_GET_PEER_RESP_ATTR_LOCAL_IP, 188 | OVPN_GET_PEER_RESP_ATTR_LOCAL_PORT, 189 | OVPN_GET_PEER_RESP_ATTR_KEEPALIVE_INTERVAL, 190 | OVPN_GET_PEER_RESP_ATTR_KEEPALIVE_TIMEOUT, 191 | OVPN_GET_PEER_RESP_ATTR_VPN_RX_BYTES, 192 | OVPN_GET_PEER_RESP_ATTR_VPN_TX_BYTES, 193 | OVPN_GET_PEER_RESP_ATTR_VPN_RX_PACKETS, 194 | OVPN_GET_PEER_RESP_ATTR_VPN_TX_PACKETS, 195 | OVPN_GET_PEER_RESP_ATTR_LINK_RX_BYTES, 196 | OVPN_GET_PEER_RESP_ATTR_LINK_TX_BYTES, 197 | OVPN_GET_PEER_RESP_ATTR_LINK_RX_PACKETS, 198 | OVPN_GET_PEER_RESP_ATTR_LINK_TX_PACKETS, 199 | 200 | __OVPN_GET_PEER_RESP_ATTR_AFTER_LAST, 201 | OVPN_GET_PEER_RESP_ATTR_MAX = __OVPN_GET_PEER_RESP_ATTR_AFTER_LAST - 1, 202 | }; 203 | 204 | enum ovpn_netlink_peer_stats_attrs { 205 | OVPN_PEER_STATS_ATTR_UNSPEC = 0, 206 | OVPN_PEER_STATS_BYTES, 207 | OVPN_PEER_STATS_PACKETS, 208 | 209 | __OVPN_PEER_STATS_ATTR_AFTER_LAST, 210 | OVPN_PEER_STATS_ATTR_MAX = __OVPN_PEER_STATS_ATTR_AFTER_LAST - 1, 211 | }; 212 | 213 | enum ovpn_netlink_peer_attrs { 214 | OVPN_PEER_ATTR_UNSPEC = 0, 215 | OVPN_PEER_ATTR_PEER_ID, 216 | OVPN_PEER_ATTR_SOCKADDR_REMOTE, 217 | OVPN_PEER_ATTR_IPV4, 218 | OVPN_PEER_ATTR_IPV6, 219 | OVPN_PEER_ATTR_LOCAL_IP, 220 | OVPN_PEER_ATTR_KEEPALIVE_INTERVAL, 221 | OVPN_PEER_ATTR_KEEPALIVE_TIMEOUT, 222 | OVPN_PEER_ATTR_ENCRYPT_KEY, 223 | OVPN_PEER_ATTR_DECRYPT_KEY, 224 | OVPN_PEER_ATTR_RX_STATS, 225 | OVPN_PEER_ATTR_TX_STATS, 226 | 227 | __OVPN_PEER_ATTR_AFTER_LAST, 228 | OVPN_PEER_ATTR_MAX = __OVPN_PEER_ATTR_AFTER_LAST - 1, 229 | }; 230 | 231 | enum ovpn_netlink_packet_attrs { 232 | OVPN_PACKET_ATTR_UNSPEC = 0, 233 | OVPN_PACKET_ATTR_PACKET, 234 | OVPN_PACKET_ATTR_PEER_ID, 235 | 236 | __OVPN_PACKET_ATTR_AFTER_LAST, 237 | OVPN_PACKET_ATTR_MAX = __OVPN_PACKET_ATTR_AFTER_LAST - 1, 238 | }; 239 | 240 | #endif /* _UAPI_LINUX_OVPN_DCO_H_ */ 241 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/main.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: Antonio Quartulli 7 | * James Yonan 8 | */ 9 | 10 | #include "main.h" 11 | 12 | #include "ovpn.h" 13 | #include "ovpnstruct.h" 14 | #include "netlink.h" 15 | #include "tcp.h" 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | 30 | #include 31 | #include 32 | 33 | /* Driver info */ 34 | #define DRV_NAME "ovpn-dco" 35 | #define DRV_VERSION OVPN_DCO_VERSION 36 | #define DRV_DESCRIPTION "OpenVPN data channel offload (ovpn-dco)" 37 | #define DRV_COPYRIGHT "(C) 2020- OpenVPN, Inc." 38 | 39 | static void ovpn_struct_free(struct net_device *net) 40 | { 41 | struct ovpn_struct *ovpn = netdev_priv(net); 42 | 43 | security_tun_dev_free_security(ovpn->security); 44 | free_percpu(net->tstats); 45 | flush_workqueue(ovpn->crypto_wq); 46 | flush_workqueue(ovpn->events_wq); 47 | destroy_workqueue(ovpn->crypto_wq); 48 | destroy_workqueue(ovpn->events_wq); 49 | rcu_barrier(); 50 | } 51 | 52 | /* Net device open */ 53 | static int ovpn_net_open(struct net_device *dev) 54 | { 55 | struct in_device *dev_v4 = __in_dev_get_rtnl(dev); 56 | 57 | if (dev_v4) { 58 | /* disable redirects as Linux gets confused by ovpn-dco handling same-LAN routing */ 59 | IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false); 60 | IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false; 61 | } 62 | 63 | netif_tx_start_all_queues(dev); 64 | return 0; 65 | } 66 | 67 | /* Net device stop -- called prior to device unload */ 68 | static int ovpn_net_stop(struct net_device *dev) 69 | { 70 | netif_tx_stop_all_queues(dev); 71 | return 0; 72 | } 73 | 74 | /******************************************* 75 | * ovpn ethtool ops 76 | *******************************************/ 77 | 78 | static int ovpn_get_link_ksettings(struct net_device *dev, 79 | struct ethtool_link_ksettings *cmd) 80 | { 81 | ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 0); 82 | ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 0); 83 | cmd->base.speed = SPEED_1000; 84 | cmd->base.duplex = DUPLEX_FULL; 85 | cmd->base.port = PORT_TP; 86 | cmd->base.phy_address = 0; 87 | cmd->base.transceiver = XCVR_INTERNAL; 88 | cmd->base.autoneg = AUTONEG_DISABLE; 89 | 90 | return 0; 91 | } 92 | 93 | static void ovpn_get_drvinfo(struct net_device *dev, 94 | struct ethtool_drvinfo *info) 95 | { 96 | strscpy(info->driver, DRV_NAME, sizeof(info->driver)); 97 | strscpy(info->version, DRV_VERSION, sizeof(info->version)); 98 | strscpy(info->bus_info, "ovpn", sizeof(info->bus_info)); 99 | } 100 | 101 | bool ovpn_dev_is_valid(const struct net_device *dev) 102 | { 103 | return dev->netdev_ops->ndo_start_xmit == ovpn_net_xmit; 104 | } 105 | 106 | /******************************************* 107 | * ovpn exported methods 108 | *******************************************/ 109 | 110 | static const struct net_device_ops ovpn_netdev_ops = { 111 | .ndo_open = ovpn_net_open, 112 | .ndo_stop = ovpn_net_stop, 113 | .ndo_start_xmit = ovpn_net_xmit, 114 | .ndo_get_stats64 = dev_get_tstats64, 115 | }; 116 | 117 | static const struct ethtool_ops ovpn_ethtool_ops = { 118 | .get_link_ksettings = ovpn_get_link_ksettings, 119 | .get_drvinfo = ovpn_get_drvinfo, 120 | .get_link = ethtool_op_get_link, 121 | .get_ts_info = ethtool_op_get_ts_info, 122 | }; 123 | 124 | static void ovpn_setup(struct net_device *dev) 125 | { 126 | /* compute the overhead considering AEAD encryption */ 127 | const int overhead = sizeof(u32) + NONCE_WIRE_SIZE + 16 + sizeof(struct udphdr) + 128 | max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); 129 | 130 | netdev_features_t feat = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM | 131 | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | 132 | NETIF_F_HIGHDMA; 133 | 134 | dev->ethtool_ops = &ovpn_ethtool_ops; 135 | dev->needs_free_netdev = true; 136 | 137 | dev->netdev_ops = &ovpn_netdev_ops; 138 | 139 | dev->priv_destructor = ovpn_struct_free; 140 | 141 | /* Point-to-Point TUN Device */ 142 | dev->hard_header_len = 0; 143 | dev->addr_len = 0; 144 | dev->mtu = ETH_DATA_LEN - overhead; 145 | dev->min_mtu = IPV4_MIN_MTU; 146 | dev->max_mtu = IP_MAX_MTU - overhead; 147 | 148 | /* Zero header length */ 149 | dev->type = ARPHRD_NONE; 150 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 151 | dev->lltx = true; 152 | 153 | dev->features |= feat; 154 | dev->hw_features |= feat; 155 | dev->hw_enc_features |= feat; 156 | 157 | dev->needed_headroom = OVPN_HEAD_ROOM; 158 | dev->needed_tailroom = OVPN_MAX_PADDING; 159 | } 160 | 161 | static const struct nla_policy ovpn_policy[IFLA_OVPN_MAX + 1] = { 162 | [IFLA_OVPN_MODE] = NLA_POLICY_RANGE(NLA_U8, __OVPN_MODE_FIRST, 163 | __OVPN_MODE_AFTER_LAST - 1), 164 | }; 165 | 166 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 15, 0) 167 | static int ovpn_newlink(struct net_device *dev, 168 | struct rtnl_newlink_params *params, 169 | struct netlink_ext_ack *extack) 170 | { 171 | struct nlattr **data = params->data; 172 | #else 173 | static int ovpn_newlink(struct net *src_net, struct net_device *dev, 174 | struct nlattr *tb[], struct nlattr *data[], 175 | struct netlink_ext_ack *extack) 176 | { 177 | #endif 178 | struct ovpn_struct *ovpn = netdev_priv(dev); 179 | int ret; 180 | 181 | ret = security_tun_dev_create(); 182 | if (ret < 0) 183 | return ret; 184 | 185 | ret = ovpn_struct_init(dev); 186 | if (ret < 0) 187 | return ret; 188 | 189 | ovpn->mode = OVPN_MODE_P2P; 190 | if (data && data[IFLA_OVPN_MODE]) { 191 | ovpn->mode = nla_get_u8(data[IFLA_OVPN_MODE]); 192 | netdev_dbg(dev, "%s: setting device (%s) mode: %u\n", __func__, dev->name, 193 | ovpn->mode); 194 | } 195 | 196 | return register_netdevice(dev); 197 | } 198 | 199 | static void ovpn_dellink(struct net_device *dev, struct list_head *head) 200 | { 201 | struct ovpn_struct *ovpn = netdev_priv(dev); 202 | 203 | switch (ovpn->mode) { 204 | case OVPN_MODE_P2P: 205 | ovpn_peer_release_p2p(ovpn); 206 | break; 207 | default: 208 | ovpn_peers_free(ovpn); 209 | break; 210 | } 211 | 212 | unregister_netdevice_queue(dev, head); 213 | } 214 | 215 | static struct rtnl_link_ops ovpn_link_ops __read_mostly = { 216 | .kind = DRV_NAME, 217 | .priv_size = sizeof(struct ovpn_struct), 218 | .setup = ovpn_setup, 219 | .policy = ovpn_policy, 220 | .maxtype = IFLA_OVPN_MAX, 221 | .newlink = ovpn_newlink, 222 | .dellink = ovpn_dellink, 223 | }; 224 | 225 | static int __init ovpn_init(void) 226 | { 227 | int err = 0; 228 | 229 | pr_info("%s %s -- %s\n", DRV_DESCRIPTION, DRV_VERSION, DRV_COPYRIGHT); 230 | 231 | err = ovpn_tcp_init(); 232 | if (err) { 233 | pr_err("ovpn: can't initialize TCP subsystem\n"); 234 | goto err; 235 | } 236 | 237 | /* init RTNL link ops */ 238 | err = rtnl_link_register(&ovpn_link_ops); 239 | if (err) { 240 | pr_err("ovpn: can't register RTNL link ops\n"); 241 | goto err; 242 | } 243 | 244 | err = ovpn_netlink_register(); 245 | if (err) { 246 | pr_err("ovpn: can't register netlink family\n"); 247 | goto err_rtnl_unregister; 248 | } 249 | 250 | return 0; 251 | 252 | err_rtnl_unregister: 253 | rtnl_link_unregister(&ovpn_link_ops); 254 | err: 255 | pr_err("ovpn: initialization failed, error status=%d\n", err); 256 | return err; 257 | } 258 | 259 | static __exit void ovpn_cleanup(void) 260 | { 261 | rtnl_link_unregister(&ovpn_link_ops); 262 | ovpn_netlink_unregister(); 263 | rcu_barrier(); /* because we use call_rcu */ 264 | } 265 | 266 | module_init(ovpn_init); 267 | module_exit(ovpn_cleanup); 268 | 269 | MODULE_DESCRIPTION(DRV_DESCRIPTION); 270 | MODULE_AUTHOR(DRV_COPYRIGHT); 271 | MODULE_LICENSE("GPL"); 272 | MODULE_VERSION(DRV_VERSION); 273 | MODULE_ALIAS_RTNL_LINK(DRV_NAME); 274 | MODULE_ALIAS_GENL_FAMILY(OVPN_NL_NAME); 275 | -------------------------------------------------------------------------------- /linux-compat.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: GPL-2.0-only */ 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: Lev Stipakov 7 | * Antonio Quartulli 8 | */ 9 | 10 | #ifndef _NET_OVPN_DCO_LINUX_COMPAT_H_ 11 | #define _NET_OVPN_DCO_LINUX_COMPAT_H_ 12 | 13 | #include 14 | #include 15 | 16 | /* 17 | * Red Hat Enterprise Linux and SUSE Linux Enterprise kernels provide 18 | * helper macros for detecting the distribution version. This is needed 19 | * here as Red Hat and SUSE backport features and changes from newer kernels 20 | * into the older kernel baseline. Therefore the RHEL and SLE kernel 21 | * features may not be correctly identified by the Linux kernel 22 | * version alone. 23 | * 24 | * To be able to build ovpn-dco on non-RHEL/SLE kernels, we need 25 | * these helper macros defined. And we want the result to 26 | * always be true, to not disable the other kernel version 27 | * checks 28 | */ 29 | #ifndef RHEL_RELEASE_CODE 30 | #define RHEL_RELEASE_CODE 0 31 | #endif 32 | #ifndef RHEL_RELEASE_VERSION 33 | #define RHEL_RELEASE_VERSION(m, n) 1 34 | #endif 35 | 36 | #ifndef SUSE_PRODUCT_CODE 37 | #define SUSE_PRODUCT_CODE 0 38 | #endif 39 | #ifndef SUSE_PRODUCT 40 | #define SUSE_PRODUCT(pr, v, pl, aux) 1 41 | #endif 42 | 43 | #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 17, 0) 44 | 45 | #include 46 | 47 | static inline void ovpn_udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, 48 | struct sk_buff *skb, __be32 src, 49 | __be32 dst, __u8 tos, __u8 ttl, 50 | __be16 df, __be16 src_port, 51 | __be16 dst_port, bool xnet, 52 | bool nocheck, u16 ipcb_flags) 53 | { 54 | udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos, ttl, df, src_port, 55 | dst_port, xnet, nocheck); 56 | } 57 | #define udp_tunnel_xmit_skb ovpn_udp_tunnel_xmit_skb 58 | 59 | static inline void ovpn_udp_tunnel6_xmit_skb(struct dst_entry *dst, 60 | struct sock *sk, 61 | struct sk_buff *skb, 62 | struct net_device *dev, 63 | struct in6_addr *saddr, 64 | struct in6_addr *daddr, 65 | __u8 prio, __u8 ttl, __be32 label, 66 | __be16 src_port, __be16 dst_port, 67 | bool nocheck, u16 ip6cb_flags) 68 | { 69 | udp_tunnel6_xmit_skb(dst, sk, skb, dev, saddr, daddr, prio, ttl, label, 70 | src_port, dst_port, nocheck); 71 | } 72 | #define udp_tunnel6_xmit_skb ovpn_udp_tunnel6_xmit_skb 73 | 74 | #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(6, 17, 0) */ 75 | 76 | #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 16, 0) 77 | 78 | #ifndef UDP_ENCAP_OVPNINUDP 79 | /* Our UDP encapsulation types, must be unique 80 | * (other values in include/uapi/linux/udp.h) 81 | */ 82 | #define UDP_ENCAP_OVPNINUDP 100 /* transport layer */ 83 | #endif 84 | 85 | #define timer_container_of from_timer 86 | 87 | enum ovpn_ifla_attrs { 88 | IFLA_OVPN_UNSPEC = 0, 89 | IFLA_OVPN_MODE, 90 | 91 | __IFLA_OVPN_AFTER_LAST, 92 | IFLA_OVPN_MAX = __IFLA_OVPN_AFTER_LAST - 1, 93 | }; 94 | 95 | enum ovpn_mode { 96 | __OVPN_MODE_FIRST = 0, 97 | OVPN_MODE_P2P = __OVPN_MODE_FIRST, 98 | OVPN_MODE_MP, 99 | 100 | __OVPN_MODE_AFTER_LAST, 101 | }; 102 | 103 | #else 104 | 105 | #define __OVPN_MODE_FIRST 0 106 | #define __OVPN_MODE_AFTER_LAST (OVPN_MODE_MP + 1) 107 | 108 | #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(6, 16, 0) */ 109 | 110 | #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0) 111 | 112 | #include 113 | #define timer_delete del_timer 114 | #define timer_delete_sync del_timer_sync 115 | 116 | #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0) */ 117 | 118 | #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 13, 0) 119 | 120 | #ifndef NLA_POLICY_MAX_LEN 121 | #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) 122 | #define NLA_POLICY_MAX_LEN(_len) { .type = NLA_BINARY, .len = _len } 123 | #else 124 | #define NLA_POLICY_MAX_LEN(_len) NLA_POLICY_MAX(NLA_BINARY, _len) 125 | #endif 126 | #endif 127 | 128 | #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(6, 13, 0) */ 129 | 130 | #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9, 6) 131 | 132 | #include 133 | #undef NETIF_F_SG 134 | #define NETIF_F_SG (__NETIF_F(SG) | NETIF_F_LLTX) 135 | 136 | #define lltx needs_free_netdev 137 | 138 | #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(6, 12, 0) */ 139 | 140 | #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9, 3) 141 | 142 | #define genl_split_ops genl_ops 143 | 144 | #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9, 3) */ 145 | 146 | #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0) && \ 147 | SUSE_PRODUCT_CODE < SUSE_PRODUCT(1, 15, 5, 0) && \ 148 | RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 10) 149 | 150 | /** 151 | * commit 58caed3dacb4 renamed to netif_napi_add_tx_weight, 152 | * commit c3f760ef1287 removed netif_tx_napi_add 153 | */ 154 | #define netif_napi_add_tx_weight netif_tx_napi_add 155 | 156 | #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0) && SUSE_PRODUCT_CODE < SUSE_PRODUCT(1, 15, 5, 0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 10) */ 157 | 158 | #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0) && \ 159 | SUSE_PRODUCT_CODE < SUSE_PRODUCT(1, 15, 5, 0) && \ 160 | RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9, 6) 161 | 162 | #define sock_is_readable stream_memory_read 163 | 164 | #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0) && SUSE_PRODUCT_CODE < SUSE_PRODUCT(1, 15, 5, 0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9, 6) */ 165 | 166 | #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 0) 167 | 168 | #define dev_get_tstats64 ip_tunnel_get_stats64 169 | 170 | #include 171 | 172 | static inline void dev_sw_netstats_tx_add(struct net_device *dev, 173 | unsigned int packets, 174 | unsigned int len) 175 | { 176 | struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 177 | 178 | u64_stats_update_begin(&tstats->syncp); 179 | tstats->tx_bytes += len; 180 | tstats->tx_packets += packets; 181 | u64_stats_update_end(&tstats->syncp); 182 | } 183 | 184 | #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 0) */ 185 | 186 | #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 0) 187 | 188 | #define genl_small_ops genl_ops 189 | #define small_ops ops 190 | #define n_small_ops n_ops 191 | 192 | #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 0) */ 193 | 194 | #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 0) 195 | 196 | #include 197 | 198 | static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len) 199 | { 200 | struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 201 | 202 | u64_stats_update_begin(&tstats->syncp); 203 | tstats->rx_bytes += len; 204 | tstats->rx_packets++; 205 | u64_stats_update_end(&tstats->syncp); 206 | } 207 | 208 | #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 0) */ 209 | 210 | #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0) 211 | 212 | /* Iterate through singly-linked GSO fragments of an skb. */ 213 | #define skb_list_walk_safe(first, skb, next_skb) \ 214 | for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \ 215 | (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL) 216 | 217 | #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0) */ 218 | 219 | #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0) 220 | /** 221 | * rcu_replace_pointer() - replace an RCU pointer, returning its old value 222 | * @rcu_ptr: RCU pointer, whose old value is returned 223 | * @ptr: regular pointer 224 | * @c: the lockdep conditions under which the dereference will take place 225 | * 226 | * Perform a replacement, where @rcu_ptr is an RCU-annotated 227 | * pointer and @c is the lockdep argument that is passed to the 228 | * rcu_dereference_protected() call used to read that pointer. The old 229 | * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr. 230 | */ 231 | #undef rcu_replace_pointer 232 | #define rcu_replace_pointer(rcu_ptr, ptr, c) \ 233 | ({ \ 234 | typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \ 235 | rcu_assign_pointer((rcu_ptr), (ptr)); \ 236 | __tmp; \ 237 | }) 238 | 239 | #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0) */ 240 | 241 | #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0) && SUSE_PRODUCT_CODE < SUSE_PRODUCT(1, 15, 3, 0) 242 | 243 | /* commit 895b5c9f206e renamed nf_reset to nf_reset_ct */ 244 | #undef nf_reset_ct 245 | #define nf_reset_ct nf_reset 246 | 247 | #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0) && SUSE_PRODUCT_CODE < SUSE_PRODUCT(1, 15, 3, 0) */ 248 | 249 | #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) 250 | 251 | /* commit 1550c171935d introduced rt_gw4 and rt_gw6 for IPv6 gateways */ 252 | #define rt_gw4 rt_gateway 253 | 254 | #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) */ 255 | 256 | #endif /* _NET_OVPN_DCO_LINUX_COMPAT_H_ */ 257 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/udp.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2019-2023 OpenVPN, Inc. 5 | * 6 | * Author: Antonio Quartulli 7 | */ 8 | 9 | #include "main.h" 10 | #include "bind.h" 11 | #include "ovpn.h" 12 | #include "ovpnstruct.h" 13 | #include "peer.h" 14 | #include "proto.h" 15 | #include "skb.h" 16 | #include "udp.h" 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | /** 28 | * ovpn_udp_encap_recv() - Start processing a received UDP packet. 29 | * If the first byte of the payload is DATA_V2, the packet is further processed, 30 | * otherwise it is forwarded to the UDP stack for delivery to user space. 31 | * 32 | * @sk: the socket the packet was received on 33 | * @skb: the sk_buff containing the actual packet 34 | * 35 | * Return codes: 36 | * 0 : we consumed or dropped packet 37 | * >0 : skb should be passed up to userspace as UDP (packet not consumed) 38 | * <0 : skb should be resubmitted as proto -N (packet not consumed) 39 | */ 40 | static int ovpn_udp_encap_recv(struct sock *sk, struct sk_buff *skb) 41 | { 42 | struct ovpn_peer *peer = NULL; 43 | struct ovpn_struct *ovpn; 44 | u32 peer_id; 45 | u8 opcode; 46 | int ret; 47 | 48 | ovpn = ovpn_from_udp_sock(sk); 49 | if (unlikely(!ovpn)) { 50 | net_err_ratelimited("%s: cannot obtain ovpn object from UDP socket\n", __func__); 51 | goto drop; 52 | } 53 | 54 | /* Make sure the first 4 bytes of the skb data buffer after the UDP header are accessible. 55 | * They are required to fetch the OP code, the key ID and the peer ID. 56 | */ 57 | if (unlikely(!pskb_may_pull(skb, sizeof(struct udphdr) + 4))) { 58 | net_dbg_ratelimited("%s: packet too small\n", __func__); 59 | goto drop; 60 | } 61 | 62 | opcode = ovpn_opcode_from_skb(skb, sizeof(struct udphdr)); 63 | if (likely(opcode == OVPN_DATA_V2)) { 64 | peer_id = ovpn_peer_id_from_skb(skb, sizeof(struct udphdr)); 65 | /* some OpenVPN server implementations send data packets with the peer-id set to 66 | * undef. In this case we skip the peer lookup by peer-id and we try with the 67 | * transport address 68 | */ 69 | if (peer_id != OVPN_PEER_ID_UNDEF) { 70 | peer = ovpn_peer_lookup_id(ovpn, peer_id); 71 | if (!peer) { 72 | net_err_ratelimited("%s: received data from unknown peer (id: %d)\n", 73 | __func__, peer_id); 74 | goto drop; 75 | } 76 | 77 | /* check if this peer changed it's IP address and update state */ 78 | ovpn_peer_float(peer, skb); 79 | } 80 | } 81 | 82 | if (!peer) { 83 | /* might be a control packet or a data packet with undef peer-id */ 84 | peer = ovpn_peer_lookup_transp_addr(ovpn, skb); 85 | if (unlikely(!peer)) { 86 | if (opcode != OVPN_DATA_V2) { 87 | netdev_dbg(ovpn->dev, 88 | "%s: control packet from unknown peer, sending to userspace", 89 | __func__); 90 | return 1; 91 | } 92 | 93 | netdev_dbg(ovpn->dev, 94 | "%s: received data with undef peer-id from unknown source\n", 95 | __func__); 96 | goto drop; 97 | } 98 | } 99 | 100 | /* At this point we know the packet is from a configured peer. 101 | * DATA_V2 packets are handled in kernel space, the rest goes to user space. 102 | * 103 | * Return 1 to instruct the stack to let the packet bubble up to userspace 104 | */ 105 | if (unlikely(opcode != OVPN_DATA_V2)) { 106 | ovpn_peer_put(peer); 107 | return 1; 108 | } 109 | 110 | /* pop off outer UDP header */ 111 | __skb_pull(skb, sizeof(struct udphdr)); 112 | 113 | ret = ovpn_recv(ovpn, peer, skb); 114 | if (unlikely(ret < 0)) { 115 | net_err_ratelimited("%s: cannot handle incoming packet from peer %d: %d\n", 116 | __func__, peer->id, ret); 117 | goto drop; 118 | } 119 | 120 | /* should this be a non DATA_V2 packet, ret will be >0 and this will instruct the UDP 121 | * stack to continue processing this packet as usual (i.e. deliver to user space) 122 | */ 123 | return ret; 124 | 125 | drop: 126 | if (peer) 127 | ovpn_peer_put(peer); 128 | kfree_skb(skb); 129 | return 0; 130 | } 131 | 132 | static int ovpn_udp4_output(struct ovpn_struct *ovpn, struct ovpn_bind *bind, 133 | struct dst_cache *cache, struct sock *sk, 134 | struct sk_buff *skb) 135 | { 136 | struct rtable *rt; 137 | struct flowi4 fl = { 138 | .saddr = bind->local.ipv4.s_addr, 139 | .daddr = bind->sa.in4.sin_addr.s_addr, 140 | .fl4_sport = inet_sk(sk)->inet_sport, 141 | .fl4_dport = bind->sa.in4.sin_port, 142 | .flowi4_proto = sk->sk_protocol, 143 | .flowi4_mark = sk->sk_mark, 144 | }; 145 | int ret; 146 | 147 | local_bh_disable(); 148 | rt = dst_cache_get_ip4(cache, &fl.saddr); 149 | if (rt) 150 | goto transmit; 151 | 152 | if (unlikely(!inet_confirm_addr(sock_net(sk), NULL, 0, fl.saddr, RT_SCOPE_HOST))) { 153 | /* we may end up here when the cached address is not usable anymore. 154 | * In this case we reset address/cache and perform a new look up 155 | */ 156 | fl.saddr = 0; 157 | bind->local.ipv4.s_addr = 0; 158 | dst_cache_reset(cache); 159 | } 160 | 161 | rt = ip_route_output_flow(sock_net(sk), &fl, sk); 162 | if (IS_ERR(rt) && PTR_ERR(rt) == -EINVAL) { 163 | fl.saddr = 0; 164 | bind->local.ipv4.s_addr = 0; 165 | dst_cache_reset(cache); 166 | 167 | rt = ip_route_output_flow(sock_net(sk), &fl, sk); 168 | } 169 | 170 | if (IS_ERR(rt)) { 171 | ret = PTR_ERR(rt); 172 | net_dbg_ratelimited("%s: no route to host %pISpc: %d\n", ovpn->dev->name, 173 | &bind->sa.in4, ret); 174 | goto err; 175 | } 176 | dst_cache_set_ip4(cache, &rt->dst, fl.saddr); 177 | 178 | transmit: 179 | udp_tunnel_xmit_skb(rt, sk, skb, fl.saddr, fl.daddr, 0, 180 | ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport, 181 | fl.fl4_dport, false, sk->sk_no_check_tx, 0); 182 | ret = 0; 183 | err: 184 | local_bh_enable(); 185 | return ret; 186 | } 187 | 188 | #if IS_ENABLED(CONFIG_IPV6) 189 | static int ovpn_udp6_output(struct ovpn_struct *ovpn, struct ovpn_bind *bind, 190 | struct dst_cache *cache, struct sock *sk, 191 | struct sk_buff *skb) 192 | { 193 | struct dst_entry *dst; 194 | int ret; 195 | 196 | struct flowi6 fl = { 197 | .saddr = bind->local.ipv6, 198 | .daddr = bind->sa.in6.sin6_addr, 199 | .fl6_sport = inet_sk(sk)->inet_sport, 200 | .fl6_dport = bind->sa.in6.sin6_port, 201 | .flowi6_proto = sk->sk_protocol, 202 | .flowi6_mark = sk->sk_mark, 203 | .flowi6_oif = bind->sa.in6.sin6_scope_id, 204 | }; 205 | 206 | local_bh_disable(); 207 | dst = dst_cache_get_ip6(cache, &fl.saddr); 208 | if (dst) 209 | goto transmit; 210 | 211 | if (unlikely(!ipv6_chk_addr(sock_net(sk), &fl.saddr, NULL, 0))) { 212 | /* we may end up here when the cached address is not usable anymore. 213 | * In this case we reset address/cache and perform a new look up 214 | */ 215 | fl.saddr = in6addr_any; 216 | bind->local.ipv6 = in6addr_any; 217 | dst_cache_reset(cache); 218 | } 219 | 220 | dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sk), sk, &fl, NULL); 221 | if (IS_ERR(dst)) { 222 | ret = PTR_ERR(dst); 223 | net_dbg_ratelimited("%s: no route to host %pISpc: %d\n", ovpn->dev->name, 224 | &bind->sa.in6, ret); 225 | goto err; 226 | } 227 | dst_cache_set_ip6(cache, dst, &fl.saddr); 228 | 229 | transmit: 230 | udp_tunnel6_xmit_skb(dst, sk, skb, skb->dev, &fl.saddr, &fl.daddr, 0, 231 | ip6_dst_hoplimit(dst), 0, fl.fl6_sport, 232 | fl.fl6_dport, udp_get_no_check6_tx(sk), 0); 233 | ret = 0; 234 | err: 235 | local_bh_enable(); 236 | return ret; 237 | } 238 | #endif 239 | 240 | /* Transmit skb utilizing kernel-provided UDP tunneling framework. 241 | * 242 | * rcu_read_lock should be held on entry. 243 | * On return, the skb is consumed. 244 | */ 245 | static int ovpn_udp_output(struct ovpn_struct *ovpn, struct ovpn_bind *bind, 246 | struct dst_cache *cache, struct sock *sk, 247 | struct sk_buff *skb) 248 | { 249 | int ret; 250 | 251 | ovpn_rcu_lockdep_assert_held(); 252 | 253 | /* set sk to null if skb is already orphaned */ 254 | if (!skb->destructor) 255 | skb->sk = NULL; 256 | 257 | /* always permit openvpn-created packets to be (outside) fragmented */ 258 | skb->ignore_df = 1; 259 | 260 | switch (bind->sa.in4.sin_family) { 261 | case AF_INET: 262 | ret = ovpn_udp4_output(ovpn, bind, cache, sk, skb); 263 | break; 264 | #if IS_ENABLED(CONFIG_IPV6) 265 | case AF_INET6: 266 | ret = ovpn_udp6_output(ovpn, bind, cache, sk, skb); 267 | break; 268 | #endif 269 | default: 270 | ret = -EAFNOSUPPORT; 271 | break; 272 | } 273 | 274 | return ret; 275 | } 276 | 277 | void ovpn_udp_send_skb(struct ovpn_struct *ovpn, struct ovpn_peer *peer, 278 | struct sk_buff *skb) 279 | { 280 | struct ovpn_bind *bind; 281 | struct socket *sock; 282 | int ret = -1; 283 | 284 | skb->dev = ovpn->dev; 285 | /* no checksum performed at this layer */ 286 | skb->ip_summed = CHECKSUM_NONE; 287 | 288 | /* get socket info */ 289 | sock = peer->sock->sock; 290 | if (unlikely(!sock)) { 291 | net_warn_ratelimited("%s: no sock for remote peer\n", __func__); 292 | goto out; 293 | } 294 | 295 | rcu_read_lock(); 296 | /* get binding */ 297 | bind = rcu_dereference(peer->bind); 298 | if (unlikely(!bind)) { 299 | net_warn_ratelimited("%s: no bind for remote peer\n", __func__); 300 | goto out_unlock; 301 | } 302 | 303 | /* crypto layer -> transport (UDP) */ 304 | ret = ovpn_udp_output(ovpn, bind, &peer->dst_cache, sock->sk, skb); 305 | 306 | out_unlock: 307 | rcu_read_unlock(); 308 | out: 309 | if (ret < 0) 310 | kfree_skb(skb); 311 | } 312 | 313 | /* Set UDP encapsulation callbacks */ 314 | int ovpn_udp_socket_attach(struct socket *sock, struct ovpn_struct *ovpn) 315 | { 316 | struct udp_tunnel_sock_cfg cfg = { 317 | .sk_user_data = ovpn, 318 | .encap_type = UDP_ENCAP_OVPNINUDP, 319 | .encap_rcv = ovpn_udp_encap_recv, 320 | }; 321 | struct ovpn_socket *old_data; 322 | 323 | /* sanity check */ 324 | if (sock->sk->sk_protocol != IPPROTO_UDP) { 325 | netdev_err(ovpn->dev, "%s: expected UDP socket\n", __func__); 326 | return -EINVAL; 327 | } 328 | 329 | /* make sure no pre-existing encapsulation handler exists */ 330 | rcu_read_lock(); 331 | old_data = rcu_dereference_sk_user_data(sock->sk); 332 | rcu_read_unlock(); 333 | if (old_data) { 334 | if (old_data->ovpn == ovpn) { 335 | netdev_dbg(ovpn->dev, 336 | "%s: provided socket already owned by this interface\n", 337 | __func__); 338 | return -EALREADY; 339 | } 340 | 341 | netdev_err(ovpn->dev, "%s: provided socket already taken by other user\n", 342 | __func__); 343 | return -EBUSY; 344 | } 345 | 346 | setup_udp_tunnel_sock(sock_net(sock->sk), sock, &cfg); 347 | 348 | return 0; 349 | } 350 | 351 | /* Detach socket from encapsulation handler and/or other callbacks */ 352 | void ovpn_udp_socket_detach(struct socket *sock) 353 | { 354 | struct udp_tunnel_sock_cfg cfg = { }; 355 | 356 | setup_udp_tunnel_sock(sock_net(sock->sk), sock, &cfg); 357 | } 358 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/crypto_aead.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: James Yonan 7 | * Antonio Quartulli 8 | */ 9 | 10 | #include "crypto_aead.h" 11 | #include "crypto.h" 12 | #include "pktid.h" 13 | #include "proto.h" 14 | #include "skb.h" 15 | 16 | #include 17 | #include 18 | #include 19 | 20 | #define AUTH_TAG_SIZE 16 21 | 22 | static int ovpn_aead_encap_overhead(const struct ovpn_crypto_key_slot *ks) 23 | { 24 | return OVPN_OP_SIZE_V2 + /* OP header size */ 25 | 4 + /* Packet ID */ 26 | crypto_aead_authsize(ks->encrypt); /* Auth Tag */ 27 | } 28 | 29 | int ovpn_aead_encrypt(struct ovpn_crypto_key_slot *ks, struct sk_buff *skb, u32 peer_id) 30 | { 31 | const unsigned int tag_size = crypto_aead_authsize(ks->encrypt); 32 | const unsigned int head_size = ovpn_aead_encap_overhead(ks); 33 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; 34 | DECLARE_CRYPTO_WAIT(wait); 35 | struct aead_request *req; 36 | struct sk_buff *trailer; 37 | u8 iv[NONCE_SIZE]; 38 | int nfrags, ret; 39 | u32 pktid, op; 40 | 41 | /* Sample AEAD header format: 42 | * 48000001 00000005 7e7046bd 444a7e28 cc6387b1 64a4d6c1 380275a... 43 | * [ OP32 ] [seq # ] [ auth tag ] [ payload ... ] 44 | * [4-byte 45 | * IV head] 46 | */ 47 | 48 | /* check that there's enough headroom in the skb for packet 49 | * encapsulation, after adding network header and encryption overhead 50 | */ 51 | if (unlikely(skb_cow_head(skb, OVPN_HEAD_ROOM + head_size))) 52 | return -ENOBUFS; 53 | 54 | /* get number of skb frags and ensure that packet data is writable */ 55 | nfrags = skb_cow_data(skb, 0, &trailer); 56 | if (unlikely(nfrags < 0)) 57 | return nfrags; 58 | 59 | if (unlikely(nfrags + 2 > ARRAY_SIZE(sg))) 60 | return -ENOSPC; 61 | 62 | req = aead_request_alloc(ks->encrypt, GFP_KERNEL); 63 | if (unlikely(!req)) 64 | return -ENOMEM; 65 | 66 | /* sg table: 67 | * 0: op, wire nonce (AD, len=OVPN_OP_SIZE_V2+NONCE_WIRE_SIZE), 68 | * 1, 2, 3, ..., n: payload, 69 | * n+1: auth_tag (len=tag_size) 70 | */ 71 | sg_init_table(sg, nfrags + 2); 72 | 73 | /* build scatterlist to encrypt packet payload */ 74 | ret = skb_to_sgvec_nomark(skb, sg + 1, 0, skb->len); 75 | if (unlikely(nfrags != ret)) { 76 | ret = -EINVAL; 77 | goto free_req; 78 | } 79 | 80 | /* append auth_tag onto scatterlist */ 81 | __skb_push(skb, tag_size); 82 | sg_set_buf(sg + nfrags + 1, skb->data, tag_size); 83 | 84 | /* obtain packet ID, which is used both as a first 85 | * 4 bytes of nonce and last 4 bytes of associated data. 86 | */ 87 | ret = ovpn_pktid_xmit_next(&ks->pid_xmit, &pktid); 88 | if (unlikely(ret < 0)) 89 | goto free_req; 90 | 91 | /* concat 4 bytes packet id and 8 bytes nonce tail into 12 bytes nonce */ 92 | ovpn_pktid_aead_write(pktid, &ks->nonce_tail_xmit, iv); 93 | 94 | /* make space for packet id and push it to the front */ 95 | __skb_push(skb, NONCE_WIRE_SIZE); 96 | memcpy(skb->data, iv, NONCE_WIRE_SIZE); 97 | 98 | /* add packet op as head of additional data */ 99 | op = ovpn_opcode_compose(OVPN_DATA_V2, ks->key_id, peer_id); 100 | __skb_push(skb, OVPN_OP_SIZE_V2); 101 | BUILD_BUG_ON(sizeof(op) != OVPN_OP_SIZE_V2); 102 | *((__force __be32 *)skb->data) = htonl(op); 103 | 104 | /* AEAD Additional data */ 105 | sg_set_buf(sg, skb->data, OVPN_OP_SIZE_V2 + NONCE_WIRE_SIZE); 106 | 107 | /* setup async crypto operation */ 108 | aead_request_set_tfm(req, ks->encrypt); 109 | aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | 110 | CRYPTO_TFM_REQ_MAY_SLEEP, 111 | crypto_req_done, &wait); 112 | aead_request_set_crypt(req, sg, sg, skb->len - head_size, iv); 113 | aead_request_set_ad(req, OVPN_OP_SIZE_V2 + NONCE_WIRE_SIZE); 114 | 115 | /* encrypt it */ 116 | ret = crypto_wait_req(crypto_aead_encrypt(req), &wait); 117 | if (ret < 0) 118 | net_err_ratelimited("%s: encrypt failed: %d\n", __func__, ret); 119 | 120 | free_req: 121 | aead_request_free(req); 122 | return ret; 123 | } 124 | 125 | int ovpn_aead_decrypt(struct ovpn_crypto_key_slot *ks, struct sk_buff *skb) 126 | { 127 | const unsigned int tag_size = crypto_aead_authsize(ks->decrypt); 128 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; 129 | int ret, payload_len, nfrags; 130 | u8 *sg_data, iv[NONCE_SIZE]; 131 | unsigned int payload_offset; 132 | DECLARE_CRYPTO_WAIT(wait); 133 | struct aead_request *req; 134 | struct sk_buff *trailer; 135 | unsigned int sg_len; 136 | __be32 *pid; 137 | 138 | payload_offset = OVPN_OP_SIZE_V2 + NONCE_WIRE_SIZE + tag_size; 139 | payload_len = skb->len - payload_offset; 140 | 141 | /* sanity check on packet size, payload size must be >= 0 */ 142 | if (unlikely(payload_len < 0)) 143 | return -EINVAL; 144 | 145 | /* Prepare the skb data buffer to be accessed up until the auth tag. 146 | * This is required because this area is directly mapped into the sg list. 147 | */ 148 | if (unlikely(!pskb_may_pull(skb, payload_offset))) 149 | return -ENODATA; 150 | 151 | /* get number of skb frags and ensure that packet data is writable */ 152 | nfrags = skb_cow_data(skb, 0, &trailer); 153 | if (unlikely(nfrags < 0)) 154 | return nfrags; 155 | 156 | if (unlikely(nfrags + 2 > ARRAY_SIZE(sg))) 157 | return -ENOSPC; 158 | 159 | req = aead_request_alloc(ks->decrypt, GFP_KERNEL); 160 | if (unlikely(!req)) 161 | return -ENOMEM; 162 | 163 | /* sg table: 164 | * 0: op, wire nonce (AD, len=OVPN_OP_SIZE_V2+NONCE_WIRE_SIZE), 165 | * 1, 2, 3, ..., n: payload, 166 | * n+1: auth_tag (len=tag_size) 167 | */ 168 | sg_init_table(sg, nfrags + 2); 169 | 170 | /* packet op is head of additional data */ 171 | sg_data = skb->data; 172 | sg_len = OVPN_OP_SIZE_V2 + NONCE_WIRE_SIZE; 173 | sg_set_buf(sg, sg_data, sg_len); 174 | 175 | /* build scatterlist to decrypt packet payload */ 176 | ret = skb_to_sgvec_nomark(skb, sg + 1, payload_offset, payload_len); 177 | if (unlikely(nfrags != ret)) { 178 | ret = -EINVAL; 179 | goto free_req; 180 | } 181 | 182 | /* append auth_tag onto scatterlist */ 183 | sg_set_buf(sg + nfrags + 1, skb->data + sg_len, tag_size); 184 | 185 | /* copy nonce into IV buffer */ 186 | memcpy(iv, skb->data + OVPN_OP_SIZE_V2, NONCE_WIRE_SIZE); 187 | memcpy(iv + NONCE_WIRE_SIZE, ks->nonce_tail_recv.u8, 188 | sizeof(struct ovpn_nonce_tail)); 189 | 190 | /* setup async crypto operation */ 191 | aead_request_set_tfm(req, ks->decrypt); 192 | aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | 193 | CRYPTO_TFM_REQ_MAY_SLEEP, 194 | crypto_req_done, &wait); 195 | aead_request_set_crypt(req, sg, sg, payload_len + tag_size, iv); 196 | 197 | aead_request_set_ad(req, NONCE_WIRE_SIZE + OVPN_OP_SIZE_V2); 198 | 199 | /* decrypt it */ 200 | ret = crypto_wait_req(crypto_aead_decrypt(req), &wait); 201 | if (ret < 0) { 202 | net_err_ratelimited("%s: decrypt failed: %d\n", __func__, ret); 203 | goto free_req; 204 | } 205 | 206 | /* PID sits after the op */ 207 | pid = (__force __be32 *)(skb->data + OVPN_OP_SIZE_V2); 208 | ret = ovpn_pktid_recv(&ks->pid_recv, ntohl(*pid), 0); 209 | if (unlikely(ret < 0)) 210 | goto free_req; 211 | 212 | /* point to encapsulated IP packet */ 213 | __skb_pull(skb, payload_offset); 214 | 215 | free_req: 216 | aead_request_free(req); 217 | return ret; 218 | } 219 | 220 | /* Initialize a struct crypto_aead object */ 221 | struct crypto_aead *ovpn_aead_init(const char *title, const char *alg_name, 222 | const unsigned char *key, unsigned int keylen) 223 | { 224 | struct crypto_aead *aead; 225 | int ret; 226 | 227 | aead = crypto_alloc_aead(alg_name, 0, 0); 228 | if (IS_ERR(aead)) { 229 | ret = PTR_ERR(aead); 230 | pr_err("%s crypto_alloc_aead failed, err=%d\n", title, ret); 231 | aead = NULL; 232 | goto error; 233 | } 234 | 235 | ret = crypto_aead_setkey(aead, key, keylen); 236 | if (ret) { 237 | pr_err("%s crypto_aead_setkey size=%u failed, err=%d\n", title, keylen, ret); 238 | goto error; 239 | } 240 | 241 | ret = crypto_aead_setauthsize(aead, AUTH_TAG_SIZE); 242 | if (ret) { 243 | pr_err("%s crypto_aead_setauthsize failed, err=%d\n", title, ret); 244 | goto error; 245 | } 246 | 247 | /* basic AEAD assumption */ 248 | if (crypto_aead_ivsize(aead) != NONCE_SIZE) { 249 | pr_err("%s IV size must be %d\n", title, NONCE_SIZE); 250 | ret = -EINVAL; 251 | goto error; 252 | } 253 | 254 | pr_debug("********* Cipher %s (%s)\n", alg_name, title); 255 | pr_debug("*** IV size=%u\n", crypto_aead_ivsize(aead)); 256 | pr_debug("*** req size=%u\n", crypto_aead_reqsize(aead)); 257 | pr_debug("*** block size=%u\n", crypto_aead_blocksize(aead)); 258 | pr_debug("*** auth size=%u\n", crypto_aead_authsize(aead)); 259 | pr_debug("*** alignmask=0x%x\n", crypto_aead_alignmask(aead)); 260 | 261 | return aead; 262 | 263 | error: 264 | crypto_free_aead(aead); 265 | return ERR_PTR(ret); 266 | } 267 | 268 | void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks) 269 | { 270 | if (!ks) 271 | return; 272 | 273 | crypto_free_aead(ks->encrypt); 274 | crypto_free_aead(ks->decrypt); 275 | kfree(ks); 276 | } 277 | 278 | static struct ovpn_crypto_key_slot * 279 | ovpn_aead_crypto_key_slot_init(enum ovpn_cipher_alg alg, 280 | const unsigned char *encrypt_key, 281 | unsigned int encrypt_keylen, 282 | const unsigned char *decrypt_key, 283 | unsigned int decrypt_keylen, 284 | const unsigned char *encrypt_nonce_tail, 285 | unsigned int encrypt_nonce_tail_len, 286 | const unsigned char *decrypt_nonce_tail, 287 | unsigned int decrypt_nonce_tail_len, 288 | u16 key_id) 289 | { 290 | struct ovpn_crypto_key_slot *ks = NULL; 291 | const char *alg_name; 292 | int ret; 293 | 294 | /* validate crypto alg */ 295 | switch (alg) { 296 | case OVPN_CIPHER_ALG_AES_GCM: 297 | alg_name = "gcm(aes)"; 298 | break; 299 | case OVPN_CIPHER_ALG_CHACHA20_POLY1305: 300 | alg_name = "rfc7539(chacha20,poly1305)"; 301 | break; 302 | default: 303 | return ERR_PTR(-EOPNOTSUPP); 304 | } 305 | 306 | /* build the key slot */ 307 | ks = kmalloc(sizeof(*ks), GFP_KERNEL); 308 | if (!ks) 309 | return ERR_PTR(-ENOMEM); 310 | 311 | ks->encrypt = NULL; 312 | ks->decrypt = NULL; 313 | kref_init(&ks->refcount); 314 | ks->key_id = key_id; 315 | 316 | ks->encrypt = ovpn_aead_init("encrypt", alg_name, encrypt_key, 317 | encrypt_keylen); 318 | if (IS_ERR(ks->encrypt)) { 319 | ret = PTR_ERR(ks->encrypt); 320 | ks->encrypt = NULL; 321 | goto destroy_ks; 322 | } 323 | 324 | ks->decrypt = ovpn_aead_init("decrypt", alg_name, decrypt_key, 325 | decrypt_keylen); 326 | if (IS_ERR(ks->decrypt)) { 327 | ret = PTR_ERR(ks->decrypt); 328 | ks->decrypt = NULL; 329 | goto destroy_ks; 330 | } 331 | 332 | if (sizeof(struct ovpn_nonce_tail) != encrypt_nonce_tail_len || 333 | sizeof(struct ovpn_nonce_tail) != decrypt_nonce_tail_len) { 334 | ret = -EINVAL; 335 | goto destroy_ks; 336 | } 337 | 338 | memcpy(ks->nonce_tail_xmit.u8, encrypt_nonce_tail, 339 | sizeof(struct ovpn_nonce_tail)); 340 | memcpy(ks->nonce_tail_recv.u8, decrypt_nonce_tail, 341 | sizeof(struct ovpn_nonce_tail)); 342 | 343 | /* init packet ID generation/validation */ 344 | ovpn_pktid_xmit_init(&ks->pid_xmit); 345 | ovpn_pktid_recv_init(&ks->pid_recv); 346 | 347 | return ks; 348 | 349 | destroy_ks: 350 | ovpn_aead_crypto_key_slot_destroy(ks); 351 | return ERR_PTR(ret); 352 | } 353 | 354 | struct ovpn_crypto_key_slot * 355 | ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc) 356 | { 357 | return ovpn_aead_crypto_key_slot_init(kc->cipher_alg, 358 | kc->encrypt.cipher_key, 359 | kc->encrypt.cipher_key_size, 360 | kc->decrypt.cipher_key, 361 | kc->decrypt.cipher_key_size, 362 | kc->encrypt.nonce_tail, 363 | kc->encrypt.nonce_tail_size, 364 | kc->decrypt.nonce_tail, 365 | kc->decrypt.nonce_tail_size, 366 | kc->key_id); 367 | } 368 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/tcp.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2019-2023 OpenVPN, Inc. 5 | * 6 | * Author: Antonio Quartulli 7 | */ 8 | 9 | #include "main.h" 10 | #include "ovpnstruct.h" 11 | #include "ovpn.h" 12 | #include "peer.h" 13 | #include "proto.h" 14 | #include "skb.h" 15 | #include "tcp.h" 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | static struct proto ovpn_tcp_prot; 23 | 24 | static int ovpn_tcp_read_sock(read_descriptor_t *desc, struct sk_buff *in_skb, 25 | unsigned int in_offset, size_t in_len) 26 | { 27 | struct sock *sk = desc->arg.data; 28 | struct ovpn_socket *sock; 29 | struct ovpn_skb_cb *cb; 30 | struct ovpn_peer *peer; 31 | size_t chunk, copied = 0; 32 | int status; 33 | void *data; 34 | u16 len; 35 | 36 | rcu_read_lock(); 37 | sock = rcu_dereference_sk_user_data(sk); 38 | rcu_read_unlock(); 39 | 40 | if (unlikely(!sock || !sock->peer)) { 41 | pr_err("ovpn: read_sock triggered for socket with no metadata\n"); 42 | desc->error = -EINVAL; 43 | return 0; 44 | } 45 | 46 | peer = sock->peer; 47 | 48 | while (in_len > 0) { 49 | /* no skb allocated means that we have to read (or finish reading) the 2 bytes 50 | * prefix containing the actual packet size. 51 | */ 52 | if (!peer->tcp.skb) { 53 | chunk = min_t(size_t, in_len, sizeof(u16) - peer->tcp.offset); 54 | WARN_ON(skb_copy_bits(in_skb, in_offset, 55 | peer->tcp.raw_len + peer->tcp.offset, chunk) < 0); 56 | peer->tcp.offset += chunk; 57 | 58 | /* keep on reading until we got the whole packet size */ 59 | if (peer->tcp.offset != sizeof(u16)) 60 | goto next_read; 61 | 62 | len = ntohs(*(__be16 *)peer->tcp.raw_len); 63 | /* invalid packet length: this is a fatal TCP error */ 64 | if (!len) { 65 | netdev_err(peer->ovpn->dev, "%s: received invalid packet length: %d\n", 66 | __func__, len); 67 | desc->error = -EINVAL; 68 | goto err; 69 | } 70 | 71 | /* add 2 bytes to allocated space (and immediately reserve them) for packet 72 | * length prepending, in case the skb has to be forwarded to userspace 73 | */ 74 | peer->tcp.skb = netdev_alloc_skb_ip_align(peer->ovpn->dev, 75 | len + sizeof(u16)); 76 | if (!peer->tcp.skb) { 77 | desc->error = -ENOMEM; 78 | goto err; 79 | } 80 | skb_reserve(peer->tcp.skb, sizeof(u16)); 81 | 82 | peer->tcp.offset = 0; 83 | peer->tcp.data_len = len; 84 | } else { 85 | chunk = min_t(size_t, in_len, peer->tcp.data_len - peer->tcp.offset); 86 | 87 | /* extend skb to accommodate the new chunk and copy it from the input skb */ 88 | data = skb_put(peer->tcp.skb, chunk); 89 | WARN_ON(skb_copy_bits(in_skb, in_offset, data, chunk) < 0); 90 | peer->tcp.offset += chunk; 91 | 92 | /* keep on reading until we get the full packet */ 93 | if (peer->tcp.offset != peer->tcp.data_len) 94 | goto next_read; 95 | 96 | /* do not perform IP caching for TCP connections */ 97 | cb = OVPN_SKB_CB(peer->tcp.skb); 98 | cb->sa_fam = AF_UNSPEC; 99 | 100 | /* At this point we know the packet is from a configured peer. 101 | * DATA_V2 packets are handled in kernel space, the rest goes to user space. 102 | * 103 | * Queue skb for sending to userspace via recvmsg on the socket 104 | */ 105 | if (likely(ovpn_opcode_from_skb(peer->tcp.skb, 0) == OVPN_DATA_V2)) { 106 | /* hold reference to peer as required by ovpn_recv(). 107 | * 108 | * NOTE: in this context we should already be holding a 109 | * reference to this peer, therefore ovpn_peer_hold() is 110 | * not expected to fail 111 | */ 112 | WARN_ON(!ovpn_peer_hold(peer)); 113 | status = ovpn_recv(peer->ovpn, peer, peer->tcp.skb); 114 | if (unlikely(status < 0)) 115 | ovpn_peer_put(peer); 116 | 117 | } else { 118 | /* prepend skb with packet len. this way userspace can parse 119 | * the packet as if it just arrived from the remote endpoint 120 | */ 121 | void *raw_len = __skb_push(peer->tcp.skb, sizeof(u16)); 122 | memcpy(raw_len, peer->tcp.raw_len, sizeof(u16)); 123 | 124 | status = ptr_ring_produce_bh(&peer->sock->recv_ring, peer->tcp.skb); 125 | if (likely(!status)) 126 | peer->tcp.sk_cb.sk_data_ready(sk); 127 | } 128 | 129 | /* skb not consumed - free it now */ 130 | if (unlikely(status < 0)) 131 | kfree_skb(peer->tcp.skb); 132 | 133 | peer->tcp.skb = NULL; 134 | peer->tcp.offset = 0; 135 | peer->tcp.data_len = 0; 136 | } 137 | next_read: 138 | in_len -= chunk; 139 | in_offset += chunk; 140 | copied += chunk; 141 | } 142 | 143 | return copied; 144 | err: 145 | netdev_err(peer->ovpn->dev, "cannot process incoming TCP data: %d\n", desc->error); 146 | ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_TRANSPORT_ERROR); 147 | return 0; 148 | } 149 | 150 | static void ovpn_tcp_data_ready(struct sock *sk) 151 | { 152 | struct socket *sock = sk->sk_socket; 153 | read_descriptor_t desc; 154 | 155 | if (unlikely(!sock || !sock->ops || !sock->ops->read_sock)) 156 | return; 157 | 158 | desc.arg.data = sk; 159 | desc.error = 0; 160 | desc.count = 1; 161 | 162 | sock->ops->read_sock(sk, &desc, ovpn_tcp_read_sock); 163 | } 164 | 165 | static void ovpn_tcp_write_space(struct sock *sk) 166 | { 167 | struct ovpn_socket *sock; 168 | 169 | rcu_read_lock(); 170 | sock = rcu_dereference_sk_user_data(sk); 171 | rcu_read_unlock(); 172 | 173 | if (!sock || !sock->peer) 174 | return; 175 | 176 | queue_work(sock->peer->ovpn->events_wq, &sock->peer->tcp.tx_work); 177 | } 178 | 179 | static bool ovpn_tcp_sock_is_readable( 180 | #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9, 0) && SUSE_PRODUCT_CODE < SUSE_PRODUCT(1, 15, 5, 0) 181 | const struct sock *sk 182 | #else 183 | struct sock *sk 184 | #endif 185 | ) 186 | 187 | { 188 | struct ovpn_socket *sock; 189 | 190 | rcu_read_lock(); 191 | sock = rcu_dereference_sk_user_data(sk); 192 | rcu_read_unlock(); 193 | 194 | if (!sock || !sock->peer) 195 | return false; 196 | 197 | return !ptr_ring_empty_bh(&sock->recv_ring); 198 | } 199 | 200 | static int ovpn_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 201 | #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0) 202 | int noblock, 203 | #endif 204 | int flags, int *addr_len) 205 | { 206 | bool tmp = flags & MSG_DONTWAIT; 207 | DEFINE_WAIT_FUNC(wait, woken_wake_function); 208 | int ret, chunk, copied = 0; 209 | struct ovpn_socket *sock; 210 | struct sk_buff *skb; 211 | long timeo; 212 | 213 | #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0) 214 | tmp = noblock; 215 | #endif 216 | 217 | if (unlikely(flags & MSG_ERRQUEUE)) 218 | return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR); 219 | 220 | timeo = sock_rcvtimeo(sk, tmp); 221 | 222 | rcu_read_lock(); 223 | sock = rcu_dereference_sk_user_data(sk); 224 | rcu_read_unlock(); 225 | 226 | if (!sock || !sock->peer) { 227 | ret = -EBADF; 228 | goto unlock; 229 | } 230 | 231 | while (ptr_ring_empty_bh(&sock->recv_ring)) { 232 | if (sk->sk_shutdown & RCV_SHUTDOWN) 233 | return 0; 234 | 235 | if (sock_flag(sk, SOCK_DONE)) 236 | return 0; 237 | 238 | if (!timeo) { 239 | ret = -EAGAIN; 240 | goto unlock; 241 | } 242 | 243 | add_wait_queue(sk_sleep(sk), &wait); 244 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 245 | sk_wait_event(sk, &timeo, !ptr_ring_empty_bh(&sock->recv_ring), &wait); 246 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 247 | remove_wait_queue(sk_sleep(sk), &wait); 248 | 249 | /* take care of signals */ 250 | if (signal_pending(current)) { 251 | ret = sock_intr_errno(timeo); 252 | goto unlock; 253 | } 254 | } 255 | 256 | while (len && (skb = __ptr_ring_peek(&sock->recv_ring))) { 257 | chunk = min_t(size_t, len, skb->len); 258 | ret = skb_copy_datagram_msg(skb, 0, msg, chunk); 259 | if (ret < 0) { 260 | pr_err("ovpn: cannot copy TCP data to userspace: %d\n", ret); 261 | kfree_skb(skb); 262 | goto unlock; 263 | } 264 | 265 | __skb_pull(skb, chunk); 266 | 267 | if (!skb->len) { 268 | /* skb was entirely consumed and can now be removed from the ring */ 269 | __ptr_ring_discard_one(&sock->recv_ring); 270 | consume_skb(skb); 271 | } 272 | 273 | len -= chunk; 274 | copied += chunk; 275 | } 276 | ret = copied; 277 | 278 | unlock: 279 | return ret ? : -EAGAIN; 280 | } 281 | 282 | static void ovpn_destroy_skb(void *skb) 283 | { 284 | consume_skb(skb); 285 | } 286 | 287 | void ovpn_tcp_socket_detach(struct socket *sock) 288 | { 289 | struct ovpn_socket *ovpn_sock; 290 | struct ovpn_peer *peer; 291 | 292 | if (!sock) 293 | return; 294 | 295 | rcu_read_lock(); 296 | ovpn_sock = rcu_dereference_sk_user_data(sock->sk); 297 | rcu_read_unlock(); 298 | 299 | if (!ovpn_sock->peer) 300 | return; 301 | 302 | peer = ovpn_sock->peer; 303 | 304 | /* restore CBs that were saved in ovpn_sock_set_tcp_cb() */ 305 | write_lock_bh(&sock->sk->sk_callback_lock); 306 | sock->sk->sk_data_ready = peer->tcp.sk_cb.sk_data_ready; 307 | sock->sk->sk_write_space = peer->tcp.sk_cb.sk_write_space; 308 | sock->sk->sk_prot = peer->tcp.sk_cb.prot; 309 | rcu_assign_sk_user_data(sock->sk, NULL); 310 | write_unlock_bh(&sock->sk->sk_callback_lock); 311 | 312 | /* cancel any ongoing work. Done after removing the CBs so that these workers cannot be 313 | * re-armed 314 | */ 315 | cancel_work_sync(&peer->tcp.tx_work); 316 | 317 | ptr_ring_cleanup(&ovpn_sock->recv_ring, ovpn_destroy_skb); 318 | ptr_ring_cleanup(&peer->tcp.tx_ring, ovpn_destroy_skb); 319 | } 320 | 321 | /* Try to send one skb (or part of it) over the TCP stream. 322 | * 323 | * Return 0 on success or a negative error code otherwise. 324 | * 325 | * Note that the skb is modified by putting away the data being sent, therefore 326 | * the caller should check if skb->len is zero to understand if the full skb was 327 | * sent or not. 328 | */ 329 | static int ovpn_tcp_send_one(struct ovpn_peer *peer, struct sk_buff *skb) 330 | { 331 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 332 | struct kvec iv = { 0 }; 333 | int ret; 334 | 335 | if (skb_linearize(skb) < 0) { 336 | net_err_ratelimited("%s: can't linearize packet\n", __func__); 337 | return -ENOMEM; 338 | } 339 | 340 | /* initialize iv structure now as skb_linearize() may have changed skb->data */ 341 | iv.iov_base = skb->data; 342 | iv.iov_len = skb->len; 343 | 344 | ret = kernel_sendmsg(peer->sock->sock, &msg, &iv, 1, iv.iov_len); 345 | if (ret > 0) { 346 | __skb_pull(skb, ret); 347 | 348 | /* since we update per-cpu stats in process context, 349 | * we need to disable softirqs 350 | */ 351 | local_bh_disable(); 352 | dev_sw_netstats_tx_add(peer->ovpn->dev, 1, ret); 353 | local_bh_enable(); 354 | 355 | return 0; 356 | } 357 | 358 | return ret; 359 | } 360 | 361 | /* Process packets in TCP TX queue */ 362 | static void ovpn_tcp_tx_work(struct work_struct *work) 363 | { 364 | struct ovpn_peer *peer; 365 | struct sk_buff *skb; 366 | int ret; 367 | 368 | peer = container_of(work, struct ovpn_peer, tcp.tx_work); 369 | while ((skb = __ptr_ring_peek(&peer->tcp.tx_ring))) { 370 | ret = ovpn_tcp_send_one(peer, skb); 371 | if (ret < 0 && ret != -EAGAIN) { 372 | net_warn_ratelimited("%s: cannot send TCP packet to peer %u: %d\n", __func__, 373 | peer->id, ret); 374 | /* in case of TCP error stop sending loop and delete peer */ 375 | ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_TRANSPORT_ERROR); 376 | break; 377 | } else if (!skb->len) { 378 | /* skb was entirely consumed and can now be removed from the ring */ 379 | __ptr_ring_discard_one(&peer->tcp.tx_ring); 380 | consume_skb(skb); 381 | } 382 | 383 | /* give a chance to be rescheduled if needed */ 384 | cond_resched(); 385 | } 386 | } 387 | 388 | /* Put packet into TCP TX queue and schedule a consumer */ 389 | void ovpn_queue_tcp_skb(struct ovpn_peer *peer, struct sk_buff *skb) 390 | { 391 | int ret; 392 | 393 | ret = ptr_ring_produce_bh(&peer->tcp.tx_ring, skb); 394 | if (ret < 0) { 395 | kfree_skb_list(skb); 396 | return; 397 | } 398 | 399 | queue_work(peer->ovpn->events_wq, &peer->tcp.tx_work); 400 | } 401 | 402 | /* Set TCP encapsulation callbacks */ 403 | int ovpn_tcp_socket_attach(struct socket *sock, struct ovpn_peer *peer) 404 | { 405 | void *old_data; 406 | int ret; 407 | 408 | INIT_WORK(&peer->tcp.tx_work, ovpn_tcp_tx_work); 409 | 410 | ret = ptr_ring_init(&peer->tcp.tx_ring, OVPN_QUEUE_LEN, GFP_KERNEL); 411 | if (ret < 0) { 412 | netdev_err(peer->ovpn->dev, "cannot allocate TCP TX ring\n"); 413 | return ret; 414 | } 415 | 416 | peer->tcp.skb = NULL; 417 | peer->tcp.offset = 0; 418 | peer->tcp.data_len = 0; 419 | 420 | write_lock_bh(&sock->sk->sk_callback_lock); 421 | 422 | /* make sure no pre-existing encapsulation handler exists */ 423 | rcu_read_lock(); 424 | old_data = rcu_dereference_sk_user_data(sock->sk); 425 | rcu_read_unlock(); 426 | if (old_data) { 427 | netdev_err(peer->ovpn->dev, "provided socket already taken by other user\n"); 428 | ret = -EBUSY; 429 | goto err; 430 | } 431 | 432 | /* sanity check */ 433 | if (sock->sk->sk_protocol != IPPROTO_TCP) { 434 | netdev_err(peer->ovpn->dev, "provided socket is UDP but expected TCP\n"); 435 | ret = -EINVAL; 436 | goto err; 437 | } 438 | 439 | /* only a fully connected socket are expected. Connection should be handled in userspace */ 440 | if (sock->sk->sk_state != TCP_ESTABLISHED) { 441 | netdev_err(peer->ovpn->dev, "provided TCP socket is not in ESTABLISHED state: %d\n", 442 | sock->sk->sk_state); 443 | ret = -EINVAL; 444 | goto err; 445 | } 446 | 447 | /* save current CBs so that they can be restored upon socket release */ 448 | peer->tcp.sk_cb.sk_data_ready = sock->sk->sk_data_ready; 449 | peer->tcp.sk_cb.sk_write_space = sock->sk->sk_write_space; 450 | peer->tcp.sk_cb.prot = sock->sk->sk_prot; 451 | 452 | /* assign our static CBs */ 453 | sock->sk->sk_data_ready = ovpn_tcp_data_ready; 454 | sock->sk->sk_write_space = ovpn_tcp_write_space; 455 | sock->sk->sk_prot = &ovpn_tcp_prot; 456 | 457 | write_unlock_bh(&sock->sk->sk_callback_lock); 458 | 459 | return 0; 460 | err: 461 | write_unlock_bh(&sock->sk->sk_callback_lock); 462 | ptr_ring_cleanup(&peer->tcp.tx_ring, NULL); 463 | 464 | return ret; 465 | } 466 | 467 | int __init ovpn_tcp_init(void) 468 | { 469 | /* We need to substitute the recvmsg and the sock_is_readable 470 | * callbacks in the sk_prot member of the sock object for TCP 471 | * sockets. 472 | * 473 | * However sock->sk_prot is a pointer to a static variable and 474 | * therefore we can't directly modify it, otherwise every socket 475 | * pointing to it will be affected. 476 | * 477 | * For this reason we create our own static copy and modify what 478 | * we need. Then we make sk_prot point to this copy 479 | * (in ovpn_tcp_socket_attach()) 480 | */ 481 | ovpn_tcp_prot = tcp_prot; 482 | ovpn_tcp_prot.recvmsg = ovpn_tcp_recvmsg; 483 | ovpn_tcp_prot.sock_is_readable = ovpn_tcp_sock_is_readable; 484 | 485 | return 0; 486 | } 487 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/ovpn.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2019-2023 OpenVPN, Inc. 5 | * 6 | * Author: James Yonan 7 | * Antonio Quartulli 8 | */ 9 | 10 | #include "main.h" 11 | #include "bind.h" 12 | #include "netlink.h" 13 | #include "ovpn.h" 14 | #include "sock.h" 15 | #include "peer.h" 16 | #include "stats.h" 17 | #include "proto.h" 18 | #include "crypto.h" 19 | #include "crypto_aead.h" 20 | #include "skb.h" 21 | #include "tcp.h" 22 | #include "udp.h" 23 | 24 | #include 25 | #include 26 | #include 27 | 28 | static const unsigned char ovpn_keepalive_message[] = { 29 | 0x2a, 0x18, 0x7b, 0xf3, 0x64, 0x1e, 0xb4, 0xcb, 30 | 0x07, 0xed, 0x2d, 0x0a, 0x98, 0x1f, 0xc7, 0x48 31 | }; 32 | 33 | static const unsigned char ovpn_explicit_exit_notify_message[] = { 34 | 0x28, 0x7f, 0x34, 0x6b, 0xd4, 0xef, 0x7a, 0x81, 35 | 0x2d, 0x56, 0xb8, 0xd3, 0xaf, 0xc5, 0x45, 0x9c, 36 | 6 // OCC_EXIT 37 | }; 38 | 39 | /* Is keepalive message? 40 | * Assumes that single byte at skb->data is defined. 41 | */ 42 | static bool ovpn_is_keepalive(struct sk_buff *skb) 43 | { 44 | if (*skb->data != OVPN_KEEPALIVE_FIRST_BYTE) 45 | return false; 46 | 47 | if (!pskb_may_pull(skb, sizeof(ovpn_keepalive_message))) 48 | return false; 49 | 50 | return !memcmp(skb->data, ovpn_keepalive_message, 51 | sizeof(ovpn_keepalive_message)); 52 | } 53 | 54 | int ovpn_struct_init(struct net_device *dev) 55 | { 56 | struct ovpn_struct *ovpn = netdev_priv(dev); 57 | int err; 58 | 59 | memset(ovpn, 0, sizeof(*ovpn)); 60 | 61 | ovpn->dev = dev; 62 | 63 | err = ovpn_netlink_init(ovpn); 64 | if (err < 0) 65 | return err; 66 | 67 | spin_lock_init(&ovpn->lock); 68 | spin_lock_init(&ovpn->peers.lock); 69 | 70 | ovpn->crypto_wq = alloc_workqueue("ovpn-crypto-wq-%s", 71 | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, 72 | dev->name); 73 | if (!ovpn->crypto_wq) 74 | return -ENOMEM; 75 | 76 | ovpn->events_wq = alloc_workqueue("ovpn-event-wq-%s", WQ_MEM_RECLAIM, 0, dev->name); 77 | if (!ovpn->events_wq) 78 | return -ENOMEM; 79 | 80 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 81 | if (!dev->tstats) 82 | return -ENOMEM; 83 | 84 | err = security_tun_dev_alloc_security(&ovpn->security); 85 | if (err < 0) 86 | return err; 87 | 88 | /* kernel -> userspace tun queue length */ 89 | ovpn->max_tun_queue_len = OVPN_MAX_TUN_QUEUE_LEN; 90 | 91 | return 0; 92 | } 93 | 94 | /* Called after decrypt to write IP packet to tun netdev. 95 | * This method is expected to manage/free skb. 96 | */ 97 | static void tun_netdev_write(struct ovpn_peer *peer, struct sk_buff *skb) 98 | { 99 | /* packet integrity was verified on the VPN layer - no need to perform 100 | * any additional check along the stack 101 | */ 102 | skb->ip_summed = CHECKSUM_UNNECESSARY; 103 | skb->csum_level = ~0; 104 | 105 | /* skb hash for transport packet no longer valid after decapsulation */ 106 | skb_clear_hash(skb); 107 | 108 | /* post-decrypt scrub -- prepare to inject encapsulated packet onto tun 109 | * interface, based on __skb_tunnel_rx() in dst.h 110 | */ 111 | skb->dev = peer->ovpn->dev; 112 | skb_set_queue_mapping(skb, 0); 113 | skb_scrub_packet(skb, true); 114 | 115 | skb_reset_network_header(skb); 116 | skb_reset_transport_header(skb); 117 | skb_probe_transport_header(skb); 118 | skb_reset_inner_headers(skb); 119 | 120 | /* update per-cpu RX stats with the stored size of encrypted packet */ 121 | 122 | /* we are in softirq context - hence no locking nor disable preemption needed */ 123 | dev_sw_netstats_rx_add(peer->ovpn->dev, skb->len); 124 | 125 | /* cause packet to be "received" by tun interface */ 126 | napi_gro_receive(&peer->napi, skb); 127 | } 128 | 129 | int ovpn_napi_poll(struct napi_struct *napi, int budget) 130 | { 131 | struct ovpn_peer *peer = container_of(napi, struct ovpn_peer, napi); 132 | struct sk_buff *skb; 133 | int work_done = 0; 134 | 135 | if (unlikely(budget <= 0)) 136 | return 0; 137 | /* this function should schedule at most 'budget' number of 138 | * packets for delivery to the tun interface. 139 | * If in the queue we have more packets than what allowed by the 140 | * budget, the next polling will take care of those 141 | */ 142 | while ((work_done < budget) && 143 | (skb = ptr_ring_consume_bh(&peer->netif_rx_ring))) { 144 | tun_netdev_write(peer, skb); 145 | work_done++; 146 | } 147 | 148 | if (work_done < budget) 149 | napi_complete_done(napi, work_done); 150 | 151 | return work_done; 152 | } 153 | 154 | /* Entry point for processing an incoming packet (in skb form) 155 | * 156 | * Enqueue the packet and schedule RX consumer. 157 | * Reference to peer is dropped only in case of success. 158 | * 159 | * Return 0 if the packet was handled (and consumed) 160 | * Return <0 in case of error (return value is error code) 161 | */ 162 | int ovpn_recv(struct ovpn_struct *ovpn, struct ovpn_peer *peer, struct sk_buff *skb) 163 | { 164 | if (unlikely(ptr_ring_produce_bh(&peer->rx_ring, skb) < 0)) 165 | return -ENOSPC; 166 | 167 | if (!queue_work(ovpn->crypto_wq, &peer->decrypt_work)) 168 | ovpn_peer_put(peer); 169 | 170 | return 0; 171 | } 172 | 173 | static int ovpn_decrypt_one(struct ovpn_peer *peer, struct sk_buff *skb) 174 | { 175 | struct ovpn_peer *allowed_peer = NULL; 176 | struct ovpn_crypto_key_slot *ks; 177 | __be16 proto; 178 | int ret = -1; 179 | u8 key_id; 180 | 181 | ovpn_peer_stats_increment_rx(&peer->link_stats, skb->len); 182 | 183 | /* get the key slot matching the key Id in the received packet */ 184 | key_id = ovpn_key_id_from_skb(skb); 185 | ks = ovpn_crypto_key_id_to_slot(&peer->crypto, key_id); 186 | if (unlikely(!ks)) { 187 | net_info_ratelimited("%s: no available key for peer %u, key-id: %u\n", __func__, 188 | peer->id, key_id); 189 | goto drop; 190 | } 191 | 192 | /* decrypt */ 193 | ret = ovpn_aead_decrypt(ks, skb); 194 | 195 | ovpn_crypto_key_slot_put(ks); 196 | 197 | if (unlikely(ret < 0)) { 198 | net_err_ratelimited("%s: error during decryption for peer %u, key-id %u: %d\n", 199 | __func__, peer->id, key_id, ret); 200 | goto drop; 201 | } 202 | 203 | /* note event of authenticated packet received for keepalive */ 204 | ovpn_peer_keepalive_recv_reset(peer); 205 | 206 | /* update source and destination endpoint for this peer */ 207 | if (peer->sock->sock->sk->sk_protocol == IPPROTO_UDP) 208 | ovpn_peer_update_local_endpoint(peer, skb); 209 | 210 | /* increment RX stats */ 211 | ovpn_peer_stats_increment_rx(&peer->vpn_stats, skb->len); 212 | 213 | /* check if this is a valid datapacket that has to be delivered to the 214 | * tun interface 215 | */ 216 | skb_reset_network_header(skb); 217 | proto = ovpn_ip_check_protocol(skb); 218 | if (unlikely(!proto)) { 219 | /* check if null packet */ 220 | if (unlikely(!pskb_may_pull(skb, 1))) { 221 | ret = -EINVAL; 222 | goto drop; 223 | } 224 | 225 | /* check if special OpenVPN message */ 226 | if (ovpn_is_keepalive(skb)) { 227 | netdev_dbg(peer->ovpn->dev, "%s: ping received from peer with id %u\n", 228 | __func__, peer->id); 229 | /* not an error */ 230 | consume_skb(skb); 231 | /* inform the caller that NAPI should not be scheduled 232 | * for this packet 233 | */ 234 | return -1; 235 | } 236 | 237 | ret = -EPROTONOSUPPORT; 238 | goto drop; 239 | } 240 | skb->protocol = proto; 241 | 242 | /* perform Reverse Path Filtering (RPF) */ 243 | allowed_peer = ovpn_peer_lookup_vpn_addr(peer->ovpn, skb, true); 244 | if (unlikely(allowed_peer != peer)) { 245 | ret = -EPERM; 246 | goto drop; 247 | } 248 | 249 | ret = ptr_ring_produce_bh(&peer->netif_rx_ring, skb); 250 | drop: 251 | if (likely(allowed_peer)) 252 | ovpn_peer_put(allowed_peer); 253 | 254 | if (unlikely(ret < 0)) 255 | kfree_skb(skb); 256 | 257 | return ret; 258 | } 259 | 260 | /* pick packet from RX queue, decrypt and forward it to the tun device */ 261 | void ovpn_decrypt_work(struct work_struct *work) 262 | { 263 | struct ovpn_peer *peer; 264 | struct sk_buff *skb; 265 | 266 | peer = container_of(work, struct ovpn_peer, decrypt_work); 267 | while ((skb = ptr_ring_consume_bh(&peer->rx_ring))) { 268 | if (likely(ovpn_decrypt_one(peer, skb) == 0)) { 269 | /* if a packet has been enqueued for NAPI, signal 270 | * availability to the networking stack 271 | */ 272 | local_bh_disable(); 273 | napi_schedule(&peer->napi); 274 | local_bh_enable(); 275 | } 276 | 277 | /* give a chance to be rescheduled if needed */ 278 | cond_resched(); 279 | } 280 | ovpn_peer_put(peer); 281 | } 282 | 283 | static bool ovpn_encrypt_one(struct ovpn_peer *peer, struct sk_buff *skb) 284 | { 285 | struct ovpn_crypto_key_slot *ks; 286 | bool success = false; 287 | int ret; 288 | 289 | /* get primary key to be used for encrypting data */ 290 | ks = ovpn_crypto_key_slot_primary(&peer->crypto); 291 | if (unlikely(!ks)) { 292 | net_warn_ratelimited("%s: error while retrieving primary key slot\n", __func__); 293 | return false; 294 | } 295 | 296 | if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL && 297 | skb_checksum_help(skb))) { 298 | net_err_ratelimited("%s: cannot compute checksum for outgoing packet\n", __func__); 299 | goto err; 300 | } 301 | 302 | ovpn_peer_stats_increment_tx(&peer->vpn_stats, skb->len); 303 | 304 | /* encrypt */ 305 | ret = ovpn_aead_encrypt(ks, skb, peer->id); 306 | if (unlikely(ret < 0)) { 307 | /* if we ran out of IVs we must kill the key as it can't be used anymore */ 308 | if (ret == -ERANGE) { 309 | netdev_warn(peer->ovpn->dev, 310 | "%s: killing primary key as we ran out of IVs\n", __func__); 311 | ovpn_crypto_kill_primary(&peer->crypto); 312 | goto err; 313 | } 314 | net_err_ratelimited("%s: error during encryption for peer %u, key-id %u: %d\n", 315 | __func__, peer->id, ks->key_id, ret); 316 | goto err; 317 | } 318 | 319 | success = true; 320 | 321 | ovpn_peer_stats_increment_tx(&peer->link_stats, skb->len); 322 | err: 323 | ovpn_crypto_key_slot_put(ks); 324 | return success; 325 | } 326 | 327 | /* Process packets in TX queue in a transport-specific way. 328 | * 329 | * UDP transport - encrypt and send across the tunnel. 330 | * TCP transport - encrypt and put into TCP TX queue. 331 | */ 332 | void ovpn_encrypt_work(struct work_struct *work) 333 | { 334 | struct sk_buff *skb, *curr, *next; 335 | struct ovpn_peer *peer; 336 | 337 | peer = container_of(work, struct ovpn_peer, encrypt_work); 338 | while ((skb = ptr_ring_consume_bh(&peer->tx_ring))) { 339 | /* this might be a GSO-segmented skb list: process each skb 340 | * independently 341 | */ 342 | skb_list_walk_safe(skb, curr, next) { 343 | /* if one segment fails encryption, we drop the entire 344 | * packet, because it does not really make sense to send 345 | * only part of it at this point 346 | */ 347 | if (unlikely(!ovpn_encrypt_one(peer, curr))) { 348 | kfree_skb_list(skb); 349 | skb = NULL; 350 | break; 351 | } 352 | } 353 | 354 | /* successful encryption */ 355 | if (skb) { 356 | skb_list_walk_safe(skb, curr, next) { 357 | skb_mark_not_on_list(curr); 358 | 359 | switch (peer->sock->sock->sk->sk_protocol) { 360 | case IPPROTO_UDP: 361 | ovpn_udp_send_skb(peer->ovpn, peer, curr); 362 | break; 363 | case IPPROTO_TCP: 364 | ovpn_tcp_send_skb(peer, curr); 365 | break; 366 | default: 367 | /* no transport configured yet */ 368 | consume_skb(skb); 369 | break; 370 | } 371 | } 372 | 373 | /* note event of authenticated packet xmit for keepalive */ 374 | ovpn_peer_keepalive_xmit_reset(peer); 375 | } 376 | 377 | /* give a chance to be rescheduled if needed */ 378 | cond_resched(); 379 | } 380 | ovpn_peer_put(peer); 381 | } 382 | 383 | /* Put skb into TX queue and schedule a consumer */ 384 | static void ovpn_queue_skb(struct ovpn_struct *ovpn, struct sk_buff *skb, struct ovpn_peer *peer) 385 | { 386 | int ret; 387 | 388 | if (likely(!peer)) 389 | peer = ovpn_peer_lookup_vpn_addr(ovpn, skb, false); 390 | if (unlikely(!peer)) { 391 | net_dbg_ratelimited("%s: no peer to send data to\n", ovpn->dev->name); 392 | goto drop; 393 | } 394 | 395 | ret = ptr_ring_produce_bh(&peer->tx_ring, skb); 396 | if (unlikely(ret < 0)) { 397 | net_err_ratelimited("%s: cannot queue packet to TX ring\n", __func__); 398 | goto drop; 399 | } 400 | 401 | if (!queue_work(ovpn->crypto_wq, &peer->encrypt_work)) 402 | ovpn_peer_put(peer); 403 | 404 | return; 405 | drop: 406 | if (peer) 407 | ovpn_peer_put(peer); 408 | kfree_skb_list(skb); 409 | } 410 | 411 | /* Net device start xmit 412 | */ 413 | netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev) 414 | { 415 | struct ovpn_struct *ovpn = netdev_priv(dev); 416 | struct sk_buff *segments, *tmp, *curr, *next; 417 | struct sk_buff_head skb_list; 418 | __be16 proto; 419 | int ret; 420 | 421 | /* reset netfilter state */ 422 | nf_reset_ct(skb); 423 | 424 | /* verify IP header size in network packet */ 425 | proto = ovpn_ip_check_protocol(skb); 426 | if (unlikely(!proto || skb->protocol != proto)) { 427 | net_err_ratelimited("%s: dropping malformed payload packet\n", 428 | dev->name); 429 | goto drop; 430 | } 431 | 432 | if (skb_is_gso(skb)) { 433 | segments = skb_gso_segment(skb, 0); 434 | if (IS_ERR(segments)) { 435 | ret = PTR_ERR(segments); 436 | net_err_ratelimited("%s: cannot segment packet: %d\n", dev->name, ret); 437 | goto drop; 438 | } 439 | 440 | consume_skb(skb); 441 | skb = segments; 442 | } 443 | 444 | /* from this moment on, "skb" might be a list */ 445 | 446 | __skb_queue_head_init(&skb_list); 447 | skb_list_walk_safe(skb, curr, next) { 448 | skb_mark_not_on_list(curr); 449 | 450 | tmp = skb_share_check(curr, GFP_ATOMIC); 451 | if (unlikely(!tmp)) { 452 | kfree_skb_list(next); 453 | net_err_ratelimited("%s: skb_share_check failed\n", dev->name); 454 | goto drop_list; 455 | } 456 | 457 | __skb_queue_tail(&skb_list, tmp); 458 | } 459 | skb_list.prev->next = NULL; 460 | 461 | ovpn_queue_skb(ovpn, skb_list.next, NULL); 462 | 463 | return NETDEV_TX_OK; 464 | 465 | drop_list: 466 | skb_queue_walk_safe(&skb_list, curr, next) 467 | kfree_skb(curr); 468 | drop: 469 | skb_tx_error(skb); 470 | kfree_skb_list(skb); 471 | return NET_XMIT_DROP; 472 | } 473 | 474 | /* Encrypt and transmit a special message to peer, such as keepalive 475 | * or explicit-exit-notify. Called from softirq context. 476 | * Assumes that caller holds a reference to peer. 477 | */ 478 | static void ovpn_xmit_special(struct ovpn_peer *peer, const void *data, 479 | const unsigned int len) 480 | { 481 | struct ovpn_struct *ovpn; 482 | struct sk_buff *skb; 483 | 484 | ovpn = peer->ovpn; 485 | if (unlikely(!ovpn)) 486 | return; 487 | 488 | skb = alloc_skb(256 + len, GFP_ATOMIC); 489 | if (unlikely(!skb)) 490 | return; 491 | 492 | skb_reserve(skb, 128); 493 | skb->priority = TC_PRIO_BESTEFFORT; 494 | memcpy(__skb_put(skb, len), data, len); 495 | 496 | /* increase reference counter when passing peer to sending queue */ 497 | if (!ovpn_peer_hold(peer)) { 498 | netdev_dbg(ovpn->dev, "%s: cannot hold peer reference for sending special packet\n", 499 | __func__); 500 | kfree_skb(skb); 501 | return; 502 | } 503 | 504 | ovpn_queue_skb(ovpn, skb, peer); 505 | } 506 | 507 | void ovpn_keepalive_xmit(struct ovpn_peer *peer) 508 | { 509 | ovpn_xmit_special(peer, ovpn_keepalive_message, 510 | sizeof(ovpn_keepalive_message)); 511 | } 512 | 513 | /* Transmit explicit exit notification. 514 | * Called from process context. 515 | */ 516 | void ovpn_explicit_exit_notify_xmit(struct ovpn_peer *peer) 517 | { 518 | ovpn_xmit_special(peer, ovpn_explicit_exit_notify_message, 519 | sizeof(ovpn_explicit_exit_notify_message)); 520 | } 521 | 522 | /* Copy buffer into skb and send it across the tunnel. 523 | * 524 | * For UDP transport: just sent the skb to peer 525 | * For TCP transport: put skb into TX queue 526 | */ 527 | int ovpn_send_data(struct ovpn_struct *ovpn, u32 peer_id, const u8 *data, size_t len) 528 | { 529 | u16 skb_len = SKB_HEADER_LEN + len; 530 | struct ovpn_peer *peer; 531 | struct sk_buff *skb; 532 | bool tcp = false; 533 | int ret = 0; 534 | 535 | peer = ovpn_peer_lookup_id(ovpn, peer_id); 536 | if (unlikely(!peer)) { 537 | netdev_dbg(ovpn->dev, "no peer to send data to\n"); 538 | return -EHOSTUNREACH; 539 | } 540 | 541 | if (peer->sock->sock->sk->sk_protocol == IPPROTO_TCP) { 542 | skb_len += sizeof(u16); 543 | tcp = true; 544 | } 545 | 546 | skb = alloc_skb(skb_len, GFP_ATOMIC); 547 | if (unlikely(!skb)) { 548 | ret = -ENOMEM; 549 | goto out; 550 | } 551 | 552 | skb_reserve(skb, SKB_HEADER_LEN); 553 | skb_put_data(skb, data, len); 554 | 555 | /* prepend TCP packet with size, as required by OpenVPN protocol */ 556 | if (tcp) { 557 | *(__be16 *)__skb_push(skb, sizeof(u16)) = htons(len); 558 | ovpn_queue_tcp_skb(peer, skb); 559 | } else { 560 | ovpn_udp_send_skb(ovpn, peer, skb); 561 | } 562 | out: 563 | ovpn_peer_put(peer); 564 | return ret; 565 | } 566 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/peer.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: James Yonan 7 | * Antonio Quartulli 8 | */ 9 | 10 | #include "ovpn.h" 11 | #include "bind.h" 12 | #include "crypto.h" 13 | #include "peer.h" 14 | #include "netlink.h" 15 | #include "tcp.h" 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | static void ovpn_peer_ping(struct timer_list *t) 24 | { 25 | struct ovpn_peer *peer = timer_container_of(peer, t, keepalive_xmit); 26 | 27 | netdev_dbg(peer->ovpn->dev, "%s: sending ping to peer %u\n", __func__, peer->id); 28 | ovpn_keepalive_xmit(peer); 29 | } 30 | 31 | static void ovpn_peer_expire(struct timer_list *t) 32 | { 33 | struct ovpn_peer *peer = timer_container_of(peer, t, keepalive_recv); 34 | 35 | netdev_dbg(peer->ovpn->dev, "%s: peer %u expired\n", __func__, peer->id); 36 | ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_EXPIRED); 37 | } 38 | 39 | /* Construct a new peer */ 40 | static struct ovpn_peer *ovpn_peer_create(struct ovpn_struct *ovpn, u32 id) 41 | { 42 | struct ovpn_peer *peer; 43 | int ret; 44 | 45 | /* alloc and init peer object */ 46 | peer = kzalloc(sizeof(*peer), GFP_KERNEL); 47 | if (!peer) 48 | return ERR_PTR(-ENOMEM); 49 | 50 | peer->id = id; 51 | peer->halt = false; 52 | peer->ovpn = ovpn; 53 | 54 | peer->vpn_addrs.ipv4.s_addr = htonl(INADDR_ANY); 55 | peer->vpn_addrs.ipv6 = in6addr_any; 56 | 57 | RCU_INIT_POINTER(peer->bind, NULL); 58 | ovpn_crypto_state_init(&peer->crypto); 59 | spin_lock_init(&peer->lock); 60 | kref_init(&peer->refcount); 61 | ovpn_peer_stats_init(&peer->vpn_stats); 62 | ovpn_peer_stats_init(&peer->link_stats); 63 | 64 | INIT_WORK(&peer->encrypt_work, ovpn_encrypt_work); 65 | INIT_WORK(&peer->decrypt_work, ovpn_decrypt_work); 66 | 67 | ret = dst_cache_init(&peer->dst_cache, GFP_KERNEL); 68 | if (ret < 0) { 69 | netdev_err(ovpn->dev, "%s: cannot initialize dst cache\n", __func__); 70 | goto err; 71 | } 72 | 73 | ret = ptr_ring_init(&peer->tx_ring, OVPN_QUEUE_LEN, GFP_KERNEL); 74 | if (ret < 0) { 75 | netdev_err(ovpn->dev, "%s: cannot allocate TX ring\n", __func__); 76 | goto err_dst_cache; 77 | } 78 | 79 | ret = ptr_ring_init(&peer->rx_ring, OVPN_QUEUE_LEN, GFP_KERNEL); 80 | if (ret < 0) { 81 | netdev_err(ovpn->dev, "%s: cannot allocate RX ring\n", __func__); 82 | goto err_tx_ring; 83 | } 84 | 85 | ret = ptr_ring_init(&peer->netif_rx_ring, OVPN_QUEUE_LEN, GFP_KERNEL); 86 | if (ret < 0) { 87 | netdev_err(ovpn->dev, "%s: cannot allocate NETIF RX ring\n", __func__); 88 | goto err_rx_ring; 89 | } 90 | 91 | /* configure and start NAPI */ 92 | netif_napi_add_tx_weight(ovpn->dev, &peer->napi, ovpn_napi_poll, 93 | NAPI_POLL_WEIGHT); 94 | napi_enable(&peer->napi); 95 | 96 | dev_hold(ovpn->dev); 97 | 98 | timer_setup(&peer->keepalive_xmit, ovpn_peer_ping, 0); 99 | timer_setup(&peer->keepalive_recv, ovpn_peer_expire, 0); 100 | 101 | return peer; 102 | err_rx_ring: 103 | ptr_ring_cleanup(&peer->rx_ring, NULL); 104 | err_tx_ring: 105 | ptr_ring_cleanup(&peer->tx_ring, NULL); 106 | err_dst_cache: 107 | dst_cache_destroy(&peer->dst_cache); 108 | err: 109 | kfree(peer); 110 | return ERR_PTR(ret); 111 | } 112 | 113 | /* Reset the ovpn_sockaddr associated with a peer */ 114 | static int ovpn_peer_reset_sockaddr(struct ovpn_peer *peer, const struct sockaddr_storage *ss, 115 | const u8 *local_ip) 116 | { 117 | struct ovpn_bind *bind; 118 | size_t ip_len; 119 | 120 | /* create new ovpn_bind object */ 121 | bind = ovpn_bind_from_sockaddr(ss); 122 | if (IS_ERR(bind)) 123 | return PTR_ERR(bind); 124 | 125 | if (local_ip) { 126 | if (ss->ss_family == AF_INET) { 127 | ip_len = sizeof(struct in_addr); 128 | } else if (ss->ss_family == AF_INET6) { 129 | ip_len = sizeof(struct in6_addr); 130 | } else { 131 | netdev_dbg(peer->ovpn->dev, "%s: invalid family for remote endpoint\n", 132 | __func__); 133 | kfree(bind); 134 | return -EINVAL; 135 | } 136 | 137 | memcpy(&bind->local, local_ip, ip_len); 138 | } 139 | 140 | /* set binding */ 141 | ovpn_bind_reset(peer, bind); 142 | 143 | return 0; 144 | } 145 | 146 | void ovpn_peer_float(struct ovpn_peer *peer, struct sk_buff *skb) 147 | { 148 | struct sockaddr_storage ss; 149 | const u8 *local_ip = NULL; 150 | struct sockaddr_in6 *sa6; 151 | struct sockaddr_in *sa; 152 | struct ovpn_bind *bind; 153 | sa_family_t family; 154 | 155 | rcu_read_lock(); 156 | bind = rcu_dereference(peer->bind); 157 | if (unlikely(!bind)) 158 | goto unlock; 159 | 160 | if (likely(ovpn_bind_skb_src_match(bind, skb))) 161 | goto unlock; 162 | 163 | family = skb_protocol_to_family(skb); 164 | 165 | if (bind->sa.in4.sin_family == family) 166 | local_ip = (u8 *)&bind->local; 167 | 168 | switch (family) { 169 | case AF_INET: 170 | sa = (struct sockaddr_in *)&ss; 171 | sa->sin_family = AF_INET; 172 | sa->sin_addr.s_addr = ip_hdr(skb)->saddr; 173 | sa->sin_port = udp_hdr(skb)->source; 174 | break; 175 | case AF_INET6: 176 | sa6 = (struct sockaddr_in6 *)&ss; 177 | sa6->sin6_family = AF_INET6; 178 | sa6->sin6_addr = ipv6_hdr(skb)->saddr; 179 | sa6->sin6_port = udp_hdr(skb)->source; 180 | sa6->sin6_scope_id = ipv6_iface_scope_id(&ipv6_hdr(skb)->saddr, skb->skb_iif); 181 | break; 182 | default: 183 | goto unlock; 184 | } 185 | 186 | netdev_dbg(peer->ovpn->dev, "%s: peer %d floated to %pIScp", __func__, peer->id, &ss); 187 | ovpn_peer_reset_sockaddr(peer, (struct sockaddr_storage *)&ss, local_ip); 188 | unlock: 189 | rcu_read_unlock(); 190 | } 191 | 192 | static void ovpn_peer_timer_delete_all(struct ovpn_peer *peer) 193 | { 194 | timer_delete_sync(&peer->keepalive_xmit); 195 | timer_delete_sync(&peer->keepalive_recv); 196 | } 197 | 198 | static void ovpn_peer_free(struct ovpn_peer *peer) 199 | { 200 | ovpn_bind_reset(peer, NULL); 201 | ovpn_peer_timer_delete_all(peer); 202 | 203 | WARN_ON(!__ptr_ring_empty(&peer->tx_ring)); 204 | ptr_ring_cleanup(&peer->tx_ring, NULL); 205 | WARN_ON(!__ptr_ring_empty(&peer->rx_ring)); 206 | ptr_ring_cleanup(&peer->rx_ring, NULL); 207 | WARN_ON(!__ptr_ring_empty(&peer->netif_rx_ring)); 208 | ptr_ring_cleanup(&peer->netif_rx_ring, NULL); 209 | 210 | dst_cache_destroy(&peer->dst_cache); 211 | 212 | dev_put(peer->ovpn->dev); 213 | 214 | kfree(peer); 215 | } 216 | 217 | static void ovpn_peer_release_rcu(struct rcu_head *head) 218 | { 219 | struct ovpn_peer *peer = container_of(head, struct ovpn_peer, rcu); 220 | 221 | ovpn_crypto_state_release(&peer->crypto); 222 | ovpn_peer_free(peer); 223 | } 224 | 225 | void ovpn_peer_release(struct ovpn_peer *peer) 226 | { 227 | napi_disable(&peer->napi); 228 | netif_napi_del(&peer->napi); 229 | 230 | if (peer->sock) 231 | ovpn_socket_put(peer->sock); 232 | 233 | call_rcu(&peer->rcu, ovpn_peer_release_rcu); 234 | } 235 | 236 | static void ovpn_peer_delete_work(struct work_struct *work) 237 | { 238 | struct ovpn_peer *peer = container_of(work, struct ovpn_peer, 239 | delete_work); 240 | ovpn_peer_release(peer); 241 | ovpn_netlink_notify_del_peer(peer); 242 | } 243 | 244 | /* Use with kref_put calls, when releasing refcount 245 | * on ovpn_peer objects. This method should only 246 | * be called from process context with config_mutex held. 247 | */ 248 | void ovpn_peer_release_kref(struct kref *kref) 249 | { 250 | struct ovpn_peer *peer = container_of(kref, struct ovpn_peer, refcount); 251 | 252 | INIT_WORK(&peer->delete_work, ovpn_peer_delete_work); 253 | queue_work(peer->ovpn->events_wq, &peer->delete_work); 254 | } 255 | 256 | struct ovpn_peer *ovpn_peer_new(struct ovpn_struct *ovpn, const struct sockaddr_storage *sa, 257 | struct socket *sock, u32 id, uint8_t *local_ip) 258 | { 259 | struct ovpn_peer *peer; 260 | int ret; 261 | 262 | /* create new peer */ 263 | peer = ovpn_peer_create(ovpn, id); 264 | if (IS_ERR(peer)) 265 | return peer; 266 | 267 | if (sock->sk->sk_protocol == IPPROTO_UDP) { 268 | /* a UDP peer must have a remote endpoint */ 269 | if (!sa) { 270 | ovpn_peer_release(peer); 271 | return ERR_PTR(-EINVAL); 272 | } 273 | 274 | /* set peer sockaddr */ 275 | ret = ovpn_peer_reset_sockaddr(peer, sa, local_ip); 276 | if (ret < 0) { 277 | ovpn_peer_release(peer); 278 | return ERR_PTR(ret); 279 | } 280 | } 281 | 282 | peer->sock = ovpn_socket_new(sock, peer); 283 | if (IS_ERR(peer->sock)) { 284 | peer->sock = NULL; 285 | ovpn_peer_release(peer); 286 | return ERR_PTR(-ENOTSOCK); 287 | } 288 | 289 | return peer; 290 | } 291 | 292 | /* Configure keepalive parameters */ 293 | void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout) 294 | { 295 | u32 delta; 296 | 297 | netdev_dbg(peer->ovpn->dev, 298 | "%s: scheduling keepalive for peer %u: interval=%u timeout=%u\n", __func__, 299 | peer->id, interval, timeout); 300 | 301 | peer->keepalive_interval = interval; 302 | if (interval > 0) { 303 | delta = msecs_to_jiffies(interval * MSEC_PER_SEC); 304 | mod_timer(&peer->keepalive_xmit, jiffies + delta); 305 | } else { 306 | timer_delete(&peer->keepalive_xmit); 307 | } 308 | 309 | peer->keepalive_timeout = timeout; 310 | if (timeout) { 311 | delta = msecs_to_jiffies(timeout * MSEC_PER_SEC); 312 | mod_timer(&peer->keepalive_recv, jiffies + delta); 313 | } else { 314 | timer_delete(&peer->keepalive_recv); 315 | } 316 | } 317 | 318 | #define ovpn_peer_index(_tbl, _key, _key_len) \ 319 | (jhash(_key, _key_len, 0) % HASH_SIZE(_tbl)) \ 320 | 321 | static struct ovpn_peer *ovpn_peer_lookup_vpn_addr4(struct hlist_head *head, __be32 *addr) 322 | { 323 | struct ovpn_peer *tmp, *peer = NULL; 324 | 325 | rcu_read_lock(); 326 | hlist_for_each_entry_rcu(tmp, head, hash_entry_addr4) { 327 | if (*addr != tmp->vpn_addrs.ipv4.s_addr) 328 | continue; 329 | 330 | if (!ovpn_peer_hold(tmp)) 331 | continue; 332 | 333 | peer = tmp; 334 | break; 335 | } 336 | rcu_read_unlock(); 337 | 338 | return peer; 339 | } 340 | 341 | static struct ovpn_peer *ovpn_peer_lookup_vpn_addr6(struct hlist_head *head, struct in6_addr *addr) 342 | { 343 | struct ovpn_peer *tmp, *peer = NULL; 344 | int i; 345 | 346 | rcu_read_lock(); 347 | hlist_for_each_entry_rcu(tmp, head, hash_entry_addr6) { 348 | for (i = 0; i < 4; i++) { 349 | if (addr->s6_addr32[i] != tmp->vpn_addrs.ipv6.s6_addr32[i]) 350 | continue; 351 | } 352 | 353 | if (!ovpn_peer_hold(tmp)) 354 | continue; 355 | 356 | peer = tmp; 357 | break; 358 | } 359 | rcu_read_unlock(); 360 | 361 | return peer; 362 | } 363 | 364 | /** 365 | * ovpn_nexthop4() - looks up the IP of the nexthop for the given destination 366 | * 367 | * Looks up in the IPv4 system routing table the IO of the nexthop to be used 368 | * to reach the destination passed as argument. IF no nexthop can be found, the 369 | * destination itself is returned as it probably has to be used as nexthop. 370 | * 371 | * @ovpn: the private data representing the current VPN session 372 | * @dst: the destination to be looked up 373 | * 374 | * Return the IP of the next hop if found or the dst itself otherwise 375 | */ 376 | static __be32 ovpn_nexthop4(struct ovpn_struct *ovpn, __be32 dst) 377 | { 378 | struct rtable *rt; 379 | struct flowi4 fl = { 380 | .daddr = dst 381 | }; 382 | 383 | rt = ip_route_output_flow(dev_net(ovpn->dev), &fl, NULL); 384 | if (IS_ERR(rt)) { 385 | net_dbg_ratelimited("%s: no route to host %pI4\n", __func__, &dst); 386 | /* if we end up here this packet is probably going to be thrown away later */ 387 | return dst; 388 | } 389 | 390 | if (!rt->rt_uses_gateway) 391 | goto out; 392 | 393 | dst = rt->rt_gw4; 394 | out: 395 | ip_rt_put(rt); 396 | return dst; 397 | } 398 | 399 | /** 400 | * ovpn_nexthop6() - looks up the IPv6 of the nexthop for the given destination 401 | * 402 | * Looks up in the IPv6 system routing table the IO of the nexthop to be used 403 | * to reach the destination passed as argument. IF no nexthop can be found, the 404 | * destination itself is returned as it probably has to be used as nexthop. 405 | * 406 | * @ovpn: the private data representing the current VPN session 407 | * @dst: the destination to be looked up 408 | * 409 | * Return the IP of the next hop if found or the dst itself otherwise 410 | */ 411 | static struct in6_addr ovpn_nexthop6(struct ovpn_struct *ovpn, struct in6_addr dst) 412 | { 413 | #if IS_ENABLED(CONFIG_IPV6) 414 | struct rt6_info *rt; 415 | struct flowi6 fl = { 416 | .daddr = dst, 417 | }; 418 | 419 | rt = (struct rt6_info *)ipv6_stub->ipv6_dst_lookup_flow(dev_net(ovpn->dev), NULL, &fl, 420 | NULL); 421 | if (IS_ERR(rt)) { 422 | net_dbg_ratelimited("%s: no route to host %pI6\n", __func__, &dst); 423 | /* if we end up here this packet is probably going to be thrown away later */ 424 | return dst; 425 | } 426 | 427 | if (!(rt->rt6i_flags & RTF_GATEWAY)) 428 | goto out; 429 | 430 | dst = rt->rt6i_gateway; 431 | out: 432 | dst_release((struct dst_entry *)rt); 433 | #endif 434 | return dst; 435 | } 436 | 437 | /** 438 | * ovpn_peer_lookup_vpn_addr() - Lookup peer to send skb to 439 | * 440 | * This function takes a tunnel packet and looks up the peer to send it to 441 | * after encapsulation. The skb is expected to be the in-tunnel packet, without 442 | * any OpenVPN related header. 443 | * 444 | * Assume that the IP header is accessible in the skb data. 445 | * 446 | * @ovpn: the private data representing the current VPN session 447 | * @skb: the skb to extract the destination address from 448 | * 449 | * Return the peer if found or NULL otherwise. 450 | */ 451 | struct ovpn_peer *ovpn_peer_lookup_vpn_addr(struct ovpn_struct *ovpn, struct sk_buff *skb, 452 | bool use_src) 453 | { 454 | struct ovpn_peer *tmp, *peer = NULL; 455 | struct hlist_head *head; 456 | sa_family_t sa_fam; 457 | struct in6_addr addr6; 458 | __be32 addr4; 459 | u32 index; 460 | 461 | /* in P2P mode, no matter the destination, packets are always sent to the single peer 462 | * listening on the other side 463 | */ 464 | if (ovpn->mode == OVPN_MODE_P2P) { 465 | rcu_read_lock(); 466 | tmp = rcu_dereference(ovpn->peer); 467 | if (likely(tmp && ovpn_peer_hold(tmp))) 468 | peer = tmp; 469 | rcu_read_unlock(); 470 | return peer; 471 | } 472 | 473 | sa_fam = skb_protocol_to_family(skb); 474 | 475 | switch (sa_fam) { 476 | case AF_INET: 477 | if (use_src) 478 | addr4 = ip_hdr(skb)->saddr; 479 | else 480 | addr4 = ip_hdr(skb)->daddr; 481 | addr4 = ovpn_nexthop4(ovpn, addr4); 482 | 483 | index = ovpn_peer_index(ovpn->peers.by_vpn_addr, &addr4, sizeof(addr4)); 484 | head = &ovpn->peers.by_vpn_addr[index]; 485 | 486 | peer = ovpn_peer_lookup_vpn_addr4(head, &addr4); 487 | break; 488 | case AF_INET6: 489 | if (use_src) 490 | addr6 = ipv6_hdr(skb)->saddr; 491 | else 492 | addr6 = ipv6_hdr(skb)->daddr; 493 | addr6 = ovpn_nexthop6(ovpn, addr6); 494 | 495 | index = ovpn_peer_index(ovpn->peers.by_vpn_addr, &addr6, sizeof(addr6)); 496 | head = &ovpn->peers.by_vpn_addr[index]; 497 | 498 | peer = ovpn_peer_lookup_vpn_addr6(head, &addr6); 499 | break; 500 | } 501 | 502 | return peer; 503 | } 504 | 505 | static bool ovpn_peer_transp_match(struct ovpn_peer *peer, struct sockaddr_storage *ss) 506 | { 507 | struct ovpn_bind *bind = rcu_dereference(peer->bind); 508 | struct sockaddr_in6 *sa6; 509 | struct sockaddr_in *sa4; 510 | 511 | if (unlikely(!bind)) 512 | return false; 513 | 514 | if (ss->ss_family != bind->sa.in4.sin_family) 515 | return false; 516 | 517 | switch (ss->ss_family) { 518 | case AF_INET: 519 | sa4 = (struct sockaddr_in *)ss; 520 | if (sa4->sin_addr.s_addr != bind->sa.in4.sin_addr.s_addr) 521 | return false; 522 | if (sa4->sin_port != bind->sa.in4.sin_port) 523 | return false; 524 | break; 525 | case AF_INET6: 526 | sa6 = (struct sockaddr_in6 *)ss; 527 | if (memcmp(&sa6->sin6_addr, &bind->sa.in6.sin6_addr, sizeof(struct in6_addr))) 528 | return false; 529 | if (sa6->sin6_port != bind->sa.in6.sin6_port) 530 | return false; 531 | break; 532 | default: 533 | return false; 534 | } 535 | 536 | return true; 537 | } 538 | 539 | static bool ovpn_peer_skb_to_sockaddr(struct sk_buff *skb, struct sockaddr_storage *ss) 540 | { 541 | struct sockaddr_in6 *sa6; 542 | struct sockaddr_in *sa4; 543 | 544 | ss->ss_family = skb_protocol_to_family(skb); 545 | switch (ss->ss_family) { 546 | case AF_INET: 547 | sa4 = (struct sockaddr_in *)ss; 548 | sa4->sin_family = AF_INET; 549 | sa4->sin_addr.s_addr = ip_hdr(skb)->saddr; 550 | sa4->sin_port = udp_hdr(skb)->source; 551 | break; 552 | case AF_INET6: 553 | sa6 = (struct sockaddr_in6 *)ss; 554 | sa6->sin6_family = AF_INET6; 555 | sa6->sin6_addr = ipv6_hdr(skb)->saddr; 556 | sa6->sin6_port = udp_hdr(skb)->source; 557 | break; 558 | default: 559 | return false; 560 | } 561 | 562 | return true; 563 | } 564 | 565 | static struct ovpn_peer *ovpn_peer_lookup_transp_addr_p2p(struct ovpn_struct *ovpn, 566 | struct sockaddr_storage *ss) 567 | { 568 | struct ovpn_peer *tmp, *peer = NULL; 569 | 570 | rcu_read_lock(); 571 | tmp = rcu_dereference(ovpn->peer); 572 | if (likely(tmp && ovpn_peer_transp_match(tmp, ss) && ovpn_peer_hold(tmp))) 573 | peer = tmp; 574 | rcu_read_unlock(); 575 | 576 | return peer; 577 | } 578 | 579 | struct ovpn_peer *ovpn_peer_lookup_transp_addr(struct ovpn_struct *ovpn, struct sk_buff *skb) 580 | { 581 | struct ovpn_peer *peer = NULL, *tmp; 582 | struct sockaddr_storage ss = { 0 }; 583 | struct hlist_head *head; 584 | size_t sa_len; 585 | bool found; 586 | u32 index; 587 | 588 | if (unlikely(!ovpn_peer_skb_to_sockaddr(skb, &ss))) 589 | return NULL; 590 | 591 | if (ovpn->mode == OVPN_MODE_P2P) 592 | return ovpn_peer_lookup_transp_addr_p2p(ovpn, &ss); 593 | 594 | switch (ss.ss_family) { 595 | case AF_INET: 596 | sa_len = sizeof(struct sockaddr_in); 597 | break; 598 | case AF_INET6: 599 | sa_len = sizeof(struct sockaddr_in6); 600 | break; 601 | default: 602 | return NULL; 603 | } 604 | 605 | index = ovpn_peer_index(ovpn->peers.by_transp_addr, &ss, sa_len); 606 | head = &ovpn->peers.by_transp_addr[index]; 607 | 608 | rcu_read_lock(); 609 | hlist_for_each_entry_rcu(tmp, head, hash_entry_transp_addr) { 610 | found = ovpn_peer_transp_match(tmp, &ss); 611 | if (!found) 612 | continue; 613 | 614 | if (!ovpn_peer_hold(tmp)) 615 | continue; 616 | 617 | peer = tmp; 618 | break; 619 | } 620 | rcu_read_unlock(); 621 | 622 | return peer; 623 | } 624 | 625 | static struct ovpn_peer *ovpn_peer_lookup_id_p2p(struct ovpn_struct *ovpn, u32 peer_id) 626 | { 627 | struct ovpn_peer *tmp, *peer = NULL; 628 | 629 | rcu_read_lock(); 630 | tmp = rcu_dereference(ovpn->peer); 631 | if (likely(tmp && tmp->id == peer_id && ovpn_peer_hold(tmp))) 632 | peer = tmp; 633 | rcu_read_unlock(); 634 | 635 | return peer; 636 | } 637 | 638 | struct ovpn_peer *ovpn_peer_lookup_id(struct ovpn_struct *ovpn, u32 peer_id) 639 | { 640 | struct ovpn_peer *tmp, *peer = NULL; 641 | struct hlist_head *head; 642 | u32 index; 643 | 644 | if (ovpn->mode == OVPN_MODE_P2P) 645 | return ovpn_peer_lookup_id_p2p(ovpn, peer_id); 646 | 647 | index = ovpn_peer_index(ovpn->peers.by_id, &peer_id, sizeof(peer_id)); 648 | head = &ovpn->peers.by_id[index]; 649 | 650 | rcu_read_lock(); 651 | hlist_for_each_entry_rcu(tmp, head, hash_entry_id) { 652 | if (tmp->id != peer_id) 653 | continue; 654 | 655 | if (!ovpn_peer_hold(tmp)) 656 | continue; 657 | 658 | peer = tmp; 659 | break; 660 | } 661 | rcu_read_unlock(); 662 | 663 | return peer; 664 | } 665 | 666 | void ovpn_peer_update_local_endpoint(struct ovpn_peer *peer, struct sk_buff *skb) 667 | { 668 | struct ovpn_bind *bind; 669 | 670 | rcu_read_lock(); 671 | bind = rcu_dereference(peer->bind); 672 | if (unlikely(!bind)) 673 | goto unlock; 674 | 675 | switch (skb_protocol_to_family(skb)) { 676 | case AF_INET: 677 | if (unlikely(bind->local.ipv4.s_addr != ip_hdr(skb)->daddr)) { 678 | netdev_dbg(peer->ovpn->dev, 679 | "%s: learning local IPv4 for peer %d (%pI4 -> %pI4)\n", __func__, 680 | peer->id, &bind->local.ipv4.s_addr, &ip_hdr(skb)->daddr); 681 | bind->local.ipv4.s_addr = ip_hdr(skb)->daddr; 682 | } 683 | break; 684 | case AF_INET6: 685 | if (unlikely(memcmp(&bind->local.ipv6, &ipv6_hdr(skb)->daddr, 686 | sizeof(bind->local.ipv6)))) { 687 | netdev_dbg(peer->ovpn->dev, 688 | "%s: learning local IPv6 for peer %d (%pI6c -> %pI6c\n", 689 | __func__, peer->id, &bind->local.ipv6, &ipv6_hdr(skb)->daddr); 690 | bind->local.ipv6 = ipv6_hdr(skb)->daddr; 691 | } 692 | break; 693 | default: 694 | break; 695 | } 696 | unlock: 697 | rcu_read_unlock(); 698 | } 699 | 700 | static int ovpn_peer_add_mp(struct ovpn_struct *ovpn, struct ovpn_peer *peer) 701 | { 702 | struct sockaddr_storage sa = { 0 }; 703 | struct sockaddr_in6 *sa6; 704 | struct sockaddr_in *sa4; 705 | struct ovpn_bind *bind; 706 | struct ovpn_peer *tmp; 707 | size_t salen; 708 | int ret = 0; 709 | u32 index; 710 | 711 | spin_lock_bh(&ovpn->peers.lock); 712 | /* do not add duplicates */ 713 | tmp = ovpn_peer_lookup_id(ovpn, peer->id); 714 | if (tmp) { 715 | ovpn_peer_put(tmp); 716 | ret = -EEXIST; 717 | goto unlock; 718 | } 719 | 720 | hlist_del_init_rcu(&peer->hash_entry_transp_addr); 721 | bind = rcu_dereference_protected(peer->bind, true); 722 | /* peers connected via UDP have bind == NULL */ 723 | if (bind) { 724 | switch (bind->sa.in4.sin_family) { 725 | case AF_INET: 726 | sa4 = (struct sockaddr_in *)&sa; 727 | 728 | sa4->sin_family = AF_INET; 729 | sa4->sin_addr.s_addr = bind->sa.in4.sin_addr.s_addr; 730 | sa4->sin_port = bind->sa.in4.sin_port; 731 | salen = sizeof(*sa4); 732 | break; 733 | case AF_INET6: 734 | sa6 = (struct sockaddr_in6 *)&sa; 735 | 736 | sa6->sin6_family = AF_INET6; 737 | sa6->sin6_addr = bind->sa.in6.sin6_addr; 738 | sa6->sin6_port = bind->sa.in6.sin6_port; 739 | salen = sizeof(*sa6); 740 | break; 741 | default: 742 | ret = -EPROTONOSUPPORT; 743 | goto unlock; 744 | } 745 | 746 | index = ovpn_peer_index(ovpn->peers.by_transp_addr, &sa, salen); 747 | hlist_add_head_rcu(&peer->hash_entry_transp_addr, 748 | &ovpn->peers.by_transp_addr[index]); 749 | } 750 | 751 | index = ovpn_peer_index(ovpn->peers.by_id, &peer->id, sizeof(peer->id)); 752 | hlist_add_head_rcu(&peer->hash_entry_id, &ovpn->peers.by_id[index]); 753 | 754 | if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) { 755 | index = ovpn_peer_index(ovpn->peers.by_vpn_addr, &peer->vpn_addrs.ipv4, 756 | sizeof(peer->vpn_addrs.ipv4)); 757 | hlist_add_head_rcu(&peer->hash_entry_addr4, &ovpn->peers.by_vpn_addr[index]); 758 | } 759 | 760 | hlist_del_init_rcu(&peer->hash_entry_addr6); 761 | if (memcmp(&peer->vpn_addrs.ipv6, &in6addr_any, sizeof(peer->vpn_addrs.ipv6))) { 762 | index = ovpn_peer_index(ovpn->peers.by_vpn_addr, &peer->vpn_addrs.ipv6, 763 | sizeof(peer->vpn_addrs.ipv6)); 764 | hlist_add_head_rcu(&peer->hash_entry_addr6, &ovpn->peers.by_vpn_addr[index]); 765 | } 766 | 767 | unlock: 768 | spin_unlock_bh(&ovpn->peers.lock); 769 | 770 | return ret; 771 | } 772 | 773 | static int ovpn_peer_add_p2p(struct ovpn_struct *ovpn, struct ovpn_peer *peer) 774 | { 775 | struct ovpn_peer *tmp; 776 | 777 | spin_lock_bh(&ovpn->lock); 778 | /* in p2p mode it is possible to have a single peer only, therefore the 779 | * old one is released and substituted by the new one 780 | */ 781 | tmp = rcu_dereference(ovpn->peer); 782 | if (tmp) { 783 | tmp->delete_reason = OVPN_DEL_PEER_REASON_TEARDOWN; 784 | ovpn_peer_put(tmp); 785 | } 786 | 787 | rcu_assign_pointer(ovpn->peer, peer); 788 | spin_unlock_bh(&ovpn->lock); 789 | 790 | return 0; 791 | } 792 | 793 | /* assume refcounter was increased by caller */ 794 | int ovpn_peer_add(struct ovpn_struct *ovpn, struct ovpn_peer *peer) 795 | { 796 | switch (ovpn->mode) { 797 | case OVPN_MODE_MP: 798 | return ovpn_peer_add_mp(ovpn, peer); 799 | case OVPN_MODE_P2P: 800 | return ovpn_peer_add_p2p(ovpn, peer); 801 | default: 802 | return -EOPNOTSUPP; 803 | } 804 | } 805 | 806 | static void ovpn_peer_unhash(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason) 807 | { 808 | hlist_del_init_rcu(&peer->hash_entry_id); 809 | hlist_del_init_rcu(&peer->hash_entry_addr4); 810 | hlist_del_init_rcu(&peer->hash_entry_addr6); 811 | hlist_del_init_rcu(&peer->hash_entry_transp_addr); 812 | 813 | ovpn_peer_put(peer); 814 | peer->delete_reason = reason; 815 | } 816 | 817 | static int ovpn_peer_del_mp(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason) 818 | { 819 | struct ovpn_peer *tmp; 820 | int ret = 0; 821 | 822 | spin_lock_bh(&peer->ovpn->peers.lock); 823 | tmp = ovpn_peer_lookup_id(peer->ovpn, peer->id); 824 | if (tmp != peer) { 825 | ret = -ENOENT; 826 | goto unlock; 827 | } 828 | ovpn_peer_unhash(peer, reason); 829 | 830 | unlock: 831 | spin_unlock_bh(&peer->ovpn->peers.lock); 832 | 833 | if (tmp) 834 | ovpn_peer_put(tmp); 835 | 836 | return ret; 837 | } 838 | 839 | static int ovpn_peer_del_p2p(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason) 840 | { 841 | struct ovpn_peer *tmp; 842 | int ret = -ENOENT; 843 | 844 | spin_lock_bh(&peer->ovpn->lock); 845 | tmp = rcu_dereference(peer->ovpn->peer); 846 | if (tmp != peer) 847 | goto unlock; 848 | 849 | ovpn_peer_put(tmp); 850 | tmp->delete_reason = reason; 851 | RCU_INIT_POINTER(peer->ovpn->peer, NULL); 852 | ret = 0; 853 | 854 | unlock: 855 | spin_unlock_bh(&peer->ovpn->lock); 856 | 857 | return ret; 858 | } 859 | 860 | void ovpn_peer_release_p2p(struct ovpn_struct *ovpn) 861 | { 862 | struct ovpn_peer *tmp; 863 | 864 | rcu_read_lock(); 865 | tmp = rcu_dereference(ovpn->peer); 866 | if (!tmp) 867 | goto unlock; 868 | 869 | ovpn_peer_del_p2p(tmp, OVPN_DEL_PEER_REASON_TEARDOWN); 870 | unlock: 871 | rcu_read_unlock(); 872 | } 873 | 874 | int ovpn_peer_del(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason) 875 | { 876 | switch (peer->ovpn->mode) { 877 | case OVPN_MODE_MP: 878 | return ovpn_peer_del_mp(peer, reason); 879 | case OVPN_MODE_P2P: 880 | return ovpn_peer_del_p2p(peer, reason); 881 | default: 882 | return -EOPNOTSUPP; 883 | } 884 | } 885 | 886 | void ovpn_peers_free(struct ovpn_struct *ovpn) 887 | { 888 | struct hlist_node *tmp; 889 | struct ovpn_peer *peer; 890 | int bkt; 891 | 892 | spin_lock_bh(&ovpn->peers.lock); 893 | hash_for_each_safe(ovpn->peers.by_id, bkt, tmp, peer, hash_entry_id) 894 | ovpn_peer_unhash(peer, OVPN_DEL_PEER_REASON_TEARDOWN); 895 | spin_unlock_bh(&ovpn->peers.lock); 896 | } 897 | -------------------------------------------------------------------------------- /drivers/net/ovpn-dco/netlink.c: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0 2 | /* OpenVPN data channel accelerator 3 | * 4 | * Copyright (C) 2020- OpenVPN, Inc. 5 | * 6 | * Author: Antonio Quartulli 7 | */ 8 | 9 | #include "main.h" 10 | #include "ovpn.h" 11 | #include "peer.h" 12 | #include "proto.h" 13 | #include "netlink.h" 14 | #include "ovpnstruct.h" 15 | #include "udp.h" 16 | 17 | #include 18 | 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | 29 | /** The ovpn-dco netlink family */ 30 | static struct genl_family ovpn_netlink_family; 31 | 32 | enum ovpn_netlink_multicast_groups { 33 | OVPN_MCGRP_PEERS, 34 | }; 35 | 36 | static const struct genl_multicast_group ovpn_netlink_mcgrps[] = { 37 | [OVPN_MCGRP_PEERS] = { .name = OVPN_NL_MULTICAST_GROUP_PEERS }, 38 | }; 39 | 40 | /** Key direction policy. Can be used for configuring an encryption and a decryption key */ 41 | static const struct nla_policy ovpn_netlink_policy_key_dir[OVPN_KEY_DIR_ATTR_MAX + 1] = { 42 | [OVPN_KEY_DIR_ATTR_CIPHER_KEY] = NLA_POLICY_MAX_LEN(U8_MAX), 43 | [OVPN_KEY_DIR_ATTR_NONCE_TAIL] = NLA_POLICY_EXACT_LEN(NONCE_TAIL_SIZE), 44 | }; 45 | 46 | /** CMD_NEW_KEY policy */ 47 | static const struct nla_policy ovpn_netlink_policy_new_key[OVPN_NEW_KEY_ATTR_MAX + 1] = { 48 | [OVPN_NEW_KEY_ATTR_PEER_ID] = { .type = NLA_U32 }, 49 | [OVPN_NEW_KEY_ATTR_KEY_SLOT] = NLA_POLICY_RANGE(NLA_U8, __OVPN_KEY_SLOT_FIRST, 50 | __OVPN_KEY_SLOT_AFTER_LAST - 1), 51 | [OVPN_NEW_KEY_ATTR_KEY_ID] = { .type = NLA_U8 }, 52 | [OVPN_NEW_KEY_ATTR_CIPHER_ALG] = { .type = NLA_U16 }, 53 | [OVPN_NEW_KEY_ATTR_ENCRYPT_KEY] = NLA_POLICY_NESTED(ovpn_netlink_policy_key_dir), 54 | [OVPN_NEW_KEY_ATTR_DECRYPT_KEY] = NLA_POLICY_NESTED(ovpn_netlink_policy_key_dir), 55 | }; 56 | 57 | /** CMD_DEL_KEY policy */ 58 | static const struct nla_policy ovpn_netlink_policy_del_key[OVPN_DEL_KEY_ATTR_MAX + 1] = { 59 | [OVPN_DEL_KEY_ATTR_PEER_ID] = { .type = NLA_U32 }, 60 | [OVPN_DEL_KEY_ATTR_KEY_SLOT] = NLA_POLICY_RANGE(NLA_U8, __OVPN_KEY_SLOT_FIRST, 61 | __OVPN_KEY_SLOT_AFTER_LAST - 1), 62 | }; 63 | 64 | /** CMD_SWAP_KEYS policy */ 65 | static const struct nla_policy ovpn_netlink_policy_swap_keys[OVPN_SWAP_KEYS_ATTR_MAX + 1] = { 66 | [OVPN_SWAP_KEYS_ATTR_PEER_ID] = { .type = NLA_U32 }, 67 | }; 68 | 69 | /** CMD_NEW_PEER policy */ 70 | static const struct nla_policy ovpn_netlink_policy_new_peer[OVPN_NEW_PEER_ATTR_MAX + 1] = { 71 | [OVPN_NEW_PEER_ATTR_PEER_ID] = { .type = NLA_U32 }, 72 | [OVPN_NEW_PEER_ATTR_SOCKADDR_REMOTE] = NLA_POLICY_MIN_LEN(sizeof(struct sockaddr)), 73 | [OVPN_NEW_PEER_ATTR_SOCKET] = { .type = NLA_U32 }, 74 | [OVPN_NEW_PEER_ATTR_IPV4] = { .type = NLA_U32 }, 75 | [OVPN_NEW_PEER_ATTR_IPV6] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)), 76 | [OVPN_NEW_PEER_ATTR_LOCAL_IP] = NLA_POLICY_MAX_LEN(sizeof(struct in6_addr)), 77 | }; 78 | 79 | /** CMD_SET_PEER policy */ 80 | static const struct nla_policy ovpn_netlink_policy_set_peer[OVPN_SET_PEER_ATTR_MAX + 1] = { 81 | [OVPN_SET_PEER_ATTR_PEER_ID] = { .type = NLA_U32 }, 82 | [OVPN_SET_PEER_ATTR_KEEPALIVE_INTERVAL] = { .type = NLA_U32 }, 83 | [OVPN_SET_PEER_ATTR_KEEPALIVE_TIMEOUT] = { .type = NLA_U32 }, 84 | }; 85 | 86 | /** CMD_DEL_PEER policy */ 87 | static const struct nla_policy ovpn_netlink_policy_del_peer[OVPN_DEL_PEER_ATTR_MAX + 1] = { 88 | [OVPN_DEL_PEER_ATTR_REASON] = NLA_POLICY_RANGE(NLA_U8, __OVPN_DEL_PEER_REASON_FIRST, 89 | __OVPN_DEL_PEER_REASON_AFTER_LAST - 1), 90 | [OVPN_DEL_PEER_ATTR_PEER_ID] = { .type = NLA_U32 }, 91 | }; 92 | 93 | /** CMD_GET_PEER policy */ 94 | static const struct nla_policy ovpn_netlink_policy_get_peer[OVPN_GET_PEER_ATTR_MAX + 1] = { 95 | [OVPN_GET_PEER_ATTR_PEER_ID] = { .type = NLA_U32 }, 96 | }; 97 | 98 | /** Generic message container policy */ 99 | static const struct nla_policy ovpn_netlink_policy[OVPN_ATTR_MAX + 1] = { 100 | [OVPN_ATTR_IFINDEX] = { .type = NLA_U32 }, 101 | [OVPN_ATTR_NEW_PEER] = NLA_POLICY_NESTED(ovpn_netlink_policy_new_peer), 102 | [OVPN_ATTR_SET_PEER] = NLA_POLICY_NESTED(ovpn_netlink_policy_set_peer), 103 | [OVPN_ATTR_DEL_PEER] = NLA_POLICY_NESTED(ovpn_netlink_policy_del_peer), 104 | [OVPN_ATTR_GET_PEER] = NLA_POLICY_NESTED(ovpn_netlink_policy_get_peer), 105 | [OVPN_ATTR_NEW_KEY] = NLA_POLICY_NESTED(ovpn_netlink_policy_new_key), 106 | [OVPN_ATTR_SWAP_KEYS] = NLA_POLICY_NESTED(ovpn_netlink_policy_swap_keys), 107 | [OVPN_ATTR_DEL_KEY] = NLA_POLICY_NESTED(ovpn_netlink_policy_del_key), 108 | }; 109 | 110 | static struct net_device * 111 | ovpn_get_dev_from_attrs(struct net *net, struct nlattr **attrs) 112 | { 113 | struct net_device *dev; 114 | int ifindex; 115 | 116 | if (!attrs[OVPN_ATTR_IFINDEX]) 117 | return ERR_PTR(-EINVAL); 118 | 119 | ifindex = nla_get_u32(attrs[OVPN_ATTR_IFINDEX]); 120 | 121 | dev = dev_get_by_index(net, ifindex); 122 | if (!dev) 123 | return ERR_PTR(-ENODEV); 124 | 125 | if (!ovpn_dev_is_valid(dev)) 126 | goto err_put_dev; 127 | 128 | return dev; 129 | 130 | err_put_dev: 131 | dev_put(dev); 132 | 133 | return ERR_PTR(-EINVAL); 134 | } 135 | 136 | /** 137 | * ovpn_pre_doit() - Prepare ovpn genl doit request 138 | * @ops: requested netlink operation 139 | * @skb: Netlink message with request data 140 | * @info: receiver information 141 | * 142 | * Return: 0 on success or negative error number in case of failure 143 | */ 144 | static int ovpn_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb, 145 | struct genl_info *info) 146 | { 147 | struct net *net = genl_info_net(info); 148 | struct net_device *dev; 149 | 150 | dev = ovpn_get_dev_from_attrs(net, info->attrs); 151 | if (IS_ERR(dev)) 152 | return PTR_ERR(dev); 153 | 154 | info->user_ptr[0] = netdev_priv(dev); 155 | 156 | return 0; 157 | } 158 | 159 | /** 160 | * ovpn_post_doit() - complete ovpn genl doit request 161 | * @ops: requested netlink operation 162 | * @skb: Netlink message with request data 163 | * @info: receiver information 164 | */ 165 | static void ovpn_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, 166 | struct genl_info *info) 167 | { 168 | struct ovpn_struct *ovpn; 169 | 170 | ovpn = info->user_ptr[0]; 171 | dev_put(ovpn->dev); 172 | } 173 | 174 | static int ovpn_netlink_get_key_dir(struct genl_info *info, struct nlattr *key, 175 | enum ovpn_cipher_alg cipher, 176 | struct ovpn_key_direction *dir) 177 | { 178 | struct nlattr *attr, *attrs[OVPN_KEY_DIR_ATTR_MAX + 1]; 179 | int ret; 180 | 181 | ret = nla_parse_nested(attrs, OVPN_KEY_DIR_ATTR_MAX, key, NULL, info->extack); 182 | if (ret) 183 | return ret; 184 | 185 | switch (cipher) { 186 | case OVPN_CIPHER_ALG_AES_GCM: 187 | case OVPN_CIPHER_ALG_CHACHA20_POLY1305: 188 | attr = attrs[OVPN_KEY_DIR_ATTR_CIPHER_KEY]; 189 | if (!attr) 190 | return -EINVAL; 191 | 192 | dir->cipher_key = nla_data(attr); 193 | dir->cipher_key_size = nla_len(attr); 194 | 195 | attr = attrs[OVPN_KEY_DIR_ATTR_NONCE_TAIL]; 196 | /* These algorithms require a 96bit nonce, 197 | * Construct it by combining 4-bytes packet id and 198 | * 8-bytes nonce-tail from userspace 199 | */ 200 | if (!attr) 201 | return -EINVAL; 202 | 203 | dir->nonce_tail = nla_data(attr); 204 | dir->nonce_tail_size = nla_len(attr); 205 | break; 206 | default: 207 | return -EINVAL; 208 | } 209 | 210 | return 0; 211 | } 212 | 213 | static int ovpn_netlink_new_key(struct sk_buff *skb, struct genl_info *info) 214 | { 215 | struct nlattr *attrs[OVPN_NEW_KEY_ATTR_MAX + 1]; 216 | struct ovpn_struct *ovpn = info->user_ptr[0]; 217 | struct ovpn_peer_key_reset pkr; 218 | struct ovpn_peer *peer; 219 | u32 peer_id; 220 | int ret; 221 | 222 | if (!info->attrs[OVPN_ATTR_NEW_KEY]) 223 | return -EINVAL; 224 | 225 | ret = nla_parse_nested(attrs, OVPN_NEW_KEY_ATTR_MAX, info->attrs[OVPN_ATTR_NEW_KEY], 226 | NULL, info->extack); 227 | if (ret) 228 | return ret; 229 | 230 | if (!attrs[OVPN_NEW_KEY_ATTR_PEER_ID] || 231 | !attrs[OVPN_NEW_KEY_ATTR_KEY_SLOT] || 232 | !attrs[OVPN_NEW_KEY_ATTR_KEY_ID] || 233 | !attrs[OVPN_NEW_KEY_ATTR_CIPHER_ALG] || 234 | !attrs[OVPN_NEW_KEY_ATTR_ENCRYPT_KEY] || 235 | !attrs[OVPN_NEW_KEY_ATTR_DECRYPT_KEY]) 236 | return -EINVAL; 237 | 238 | peer_id = nla_get_u32(attrs[OVPN_NEW_KEY_ATTR_PEER_ID]); 239 | pkr.slot = nla_get_u8(attrs[OVPN_NEW_KEY_ATTR_KEY_SLOT]); 240 | pkr.key.key_id = nla_get_u16(attrs[OVPN_NEW_KEY_ATTR_KEY_ID]); 241 | 242 | pkr.key.cipher_alg = nla_get_u16(attrs[OVPN_NEW_KEY_ATTR_CIPHER_ALG]); 243 | 244 | ret = ovpn_netlink_get_key_dir(info, attrs[OVPN_NEW_KEY_ATTR_ENCRYPT_KEY], 245 | pkr.key.cipher_alg, &pkr.key.encrypt); 246 | if (ret < 0) 247 | return ret; 248 | 249 | ret = ovpn_netlink_get_key_dir(info, attrs[OVPN_NEW_KEY_ATTR_DECRYPT_KEY], 250 | pkr.key.cipher_alg, &pkr.key.decrypt); 251 | if (ret < 0) 252 | return ret; 253 | 254 | peer = ovpn_peer_lookup_id(ovpn, peer_id); 255 | if (!peer) { 256 | netdev_dbg(ovpn->dev, "%s: no peer with id %u to set key for\n", __func__, peer_id); 257 | return -ENOENT; 258 | } 259 | 260 | mutex_lock(&peer->crypto.mutex); 261 | ret = ovpn_crypto_state_reset(&peer->crypto, &pkr); 262 | if (ret < 0) { 263 | netdev_dbg(ovpn->dev, "%s: cannot install new key for peer %u\n", __func__, 264 | peer_id); 265 | goto unlock; 266 | } 267 | 268 | netdev_dbg(ovpn->dev, "%s: new key installed (id=%u) for peer %u\n", __func__, 269 | pkr.key.key_id, peer_id); 270 | unlock: 271 | mutex_unlock(&peer->crypto.mutex); 272 | ovpn_peer_put(peer); 273 | return ret; 274 | } 275 | 276 | static int ovpn_netlink_del_key(struct sk_buff *skb, struct genl_info *info) 277 | { 278 | struct nlattr *attrs[OVPN_DEL_KEY_ATTR_MAX + 1]; 279 | struct ovpn_struct *ovpn = info->user_ptr[0]; 280 | enum ovpn_key_slot slot; 281 | struct ovpn_peer *peer; 282 | u32 peer_id; 283 | int ret; 284 | 285 | if (!info->attrs[OVPN_ATTR_DEL_KEY]) 286 | return -EINVAL; 287 | 288 | ret = nla_parse_nested(attrs, OVPN_DEL_KEY_ATTR_MAX, info->attrs[OVPN_ATTR_DEL_KEY], NULL, 289 | info->extack); 290 | if (ret) 291 | return ret; 292 | 293 | if (!attrs[OVPN_DEL_KEY_ATTR_PEER_ID] || !attrs[OVPN_DEL_KEY_ATTR_KEY_SLOT]) 294 | return -EINVAL; 295 | 296 | peer_id = nla_get_u32(attrs[OVPN_DEL_KEY_ATTR_PEER_ID]); 297 | slot = nla_get_u8(attrs[OVPN_DEL_KEY_ATTR_KEY_SLOT]); 298 | 299 | peer = ovpn_peer_lookup_id(ovpn, peer_id); 300 | if (!peer) 301 | return -ENOENT; 302 | 303 | ovpn_crypto_key_slot_delete(&peer->crypto, slot); 304 | ovpn_peer_put(peer); 305 | 306 | return 0; 307 | } 308 | 309 | static int ovpn_netlink_swap_keys(struct sk_buff *skb, struct genl_info *info) 310 | { 311 | struct nlattr *attrs[OVPN_SWAP_KEYS_ATTR_MAX + 1]; 312 | struct ovpn_struct *ovpn = info->user_ptr[0]; 313 | struct ovpn_peer *peer; 314 | u32 peer_id; 315 | int ret; 316 | 317 | if (!info->attrs[OVPN_ATTR_SWAP_KEYS]) 318 | return -EINVAL; 319 | 320 | ret = nla_parse_nested(attrs, OVPN_SWAP_KEYS_ATTR_MAX, info->attrs[OVPN_ATTR_SWAP_KEYS], 321 | NULL, info->extack); 322 | if (ret) 323 | return ret; 324 | 325 | if (!attrs[OVPN_SWAP_KEYS_ATTR_PEER_ID]) 326 | return -EINVAL; 327 | 328 | peer_id = nla_get_u32(attrs[OVPN_SWAP_KEYS_ATTR_PEER_ID]); 329 | 330 | peer = ovpn_peer_lookup_id(ovpn, peer_id); 331 | if (!peer) 332 | return -ENOENT; 333 | 334 | ovpn_crypto_key_slots_swap(&peer->crypto); 335 | ovpn_peer_put(peer); 336 | 337 | return 0; 338 | } 339 | 340 | static int ovpn_netlink_new_peer(struct sk_buff *skb, struct genl_info *info) 341 | { 342 | struct nlattr *attrs[OVPN_NEW_PEER_ATTR_MAX + 1]; 343 | struct ovpn_struct *ovpn = info->user_ptr[0]; 344 | struct sockaddr_storage *ss = NULL; 345 | struct sockaddr_in mapped; 346 | struct sockaddr_in6 *in6; 347 | struct ovpn_peer *peer; 348 | size_t sa_len, ip_len; 349 | struct socket *sock; 350 | u8 *local_ip = NULL; 351 | u32 sockfd, id; 352 | int ret; 353 | 354 | if (!info->attrs[OVPN_ATTR_NEW_PEER]) 355 | return -EINVAL; 356 | 357 | ret = nla_parse_nested(attrs, OVPN_NEW_PEER_ATTR_MAX, info->attrs[OVPN_ATTR_NEW_PEER], NULL, 358 | info->extack); 359 | if (ret) 360 | return ret; 361 | 362 | if (!attrs[OVPN_NEW_PEER_ATTR_PEER_ID] || !attrs[OVPN_NEW_PEER_ATTR_SOCKET]) { 363 | netdev_err(ovpn->dev, "%s: basic attributes missing\n", __func__); 364 | return -EINVAL; 365 | } 366 | 367 | 368 | if (ovpn->mode == OVPN_MODE_MP && !attrs[OVPN_NEW_PEER_ATTR_IPV4] && 369 | !attrs[OVPN_NEW_PEER_ATTR_IPV6]) { 370 | netdev_err(ovpn->dev, "%s: a VPN IP is required when adding a peer in MP mode\n", 371 | __func__); 372 | return -EINVAL; 373 | } 374 | 375 | /* lookup the fd in the kernel table and extract the socket object */ 376 | sockfd = nla_get_u32(attrs[OVPN_NEW_PEER_ATTR_SOCKET]); 377 | /* sockfd_lookup() increases sock's refcounter */ 378 | sock = sockfd_lookup(sockfd, &ret); 379 | if (!sock) { 380 | netdev_dbg(ovpn->dev, "%s: cannot lookup peer socket (fd=%u): %d\n", __func__, 381 | sockfd, ret); 382 | return -ENOTSOCK; 383 | } 384 | 385 | /* Only when using UDP as transport protocol the remote endpoint must be configured 386 | * so that ovpn-dco knows where to send packets to. 387 | * 388 | * In case of TCP, the socket is connected to the peer and ovpn-dco will just send bytes 389 | * over it, without the need to specify a destination. 390 | */ 391 | if (sock->sk->sk_protocol == IPPROTO_UDP) { 392 | ret = -EINVAL; 393 | 394 | if (!attrs[OVPN_NEW_PEER_ATTR_SOCKADDR_REMOTE]) { 395 | netdev_err(ovpn->dev, "%s: cannot add UDP peer with no remote endpoint\n", 396 | __func__); 397 | goto sockfd_release; 398 | } 399 | 400 | ss = nla_data(attrs[OVPN_NEW_PEER_ATTR_SOCKADDR_REMOTE]); 401 | sa_len = nla_len(attrs[OVPN_NEW_PEER_ATTR_SOCKADDR_REMOTE]); 402 | switch (sa_len) { 403 | case sizeof(struct sockaddr_in): 404 | if (ss->ss_family == AF_INET) 405 | /* valid sockaddr */ 406 | break; 407 | 408 | netdev_err(ovpn->dev, "%s: remote sockaddr_in has invalid family\n", 409 | __func__); 410 | goto sockfd_release; 411 | case sizeof(struct sockaddr_in6): 412 | if (ss->ss_family == AF_INET6) 413 | /* valid sockaddr */ 414 | break; 415 | 416 | netdev_err(ovpn->dev, "%s: remote sockaddr_in6 has invalid family\n", 417 | __func__); 418 | goto sockfd_release; 419 | default: 420 | netdev_err(ovpn->dev, "%s: invalid size for sockaddr\n", __func__); 421 | goto sockfd_release; 422 | } 423 | 424 | if (ss->ss_family == AF_INET6) { 425 | in6 = (struct sockaddr_in6 *)ss; 426 | 427 | if (ipv6_addr_type(&in6->sin6_addr) & IPV6_ADDR_MAPPED) { 428 | mapped.sin_family = AF_INET; 429 | mapped.sin_addr.s_addr = in6->sin6_addr.s6_addr32[3]; 430 | mapped.sin_port = in6->sin6_port; 431 | ss = (struct sockaddr_storage *)&mapped; 432 | } 433 | } 434 | 435 | /* When using UDP we may be talking over socket bound to 0.0.0.0/::. 436 | * In this case, if the host has multiple IPs, we need to make sure 437 | * that outgoing traffic has as source IP the same address that the 438 | * peer is using to reach us. 439 | * 440 | * Since early control packets were all forwarded to userspace, we 441 | * need the latter to tell us what IP has to be used. 442 | */ 443 | if (attrs[OVPN_NEW_PEER_ATTR_LOCAL_IP]) { 444 | ip_len = nla_len(attrs[OVPN_NEW_PEER_ATTR_LOCAL_IP]); 445 | local_ip = nla_data(attrs[OVPN_NEW_PEER_ATTR_LOCAL_IP]); 446 | 447 | if (ip_len == sizeof(struct in_addr)) { 448 | if (ss->ss_family != AF_INET) { 449 | netdev_dbg(ovpn->dev, 450 | "%s: the specified local IP is IPv4, but the peer endpoint is not\n", 451 | __func__); 452 | goto sockfd_release; 453 | } 454 | } else if (ip_len == sizeof(struct in6_addr)) { 455 | bool is_mapped = ipv6_addr_type((struct in6_addr *)local_ip) & 456 | IPV6_ADDR_MAPPED; 457 | 458 | if (ss->ss_family != AF_INET6 && !is_mapped) { 459 | netdev_dbg(ovpn->dev, 460 | "%s: the specified local IP is IPv6, but the peer endpoint is not\n", 461 | __func__); 462 | goto sockfd_release; 463 | } 464 | 465 | if (is_mapped) 466 | /* this is an IPv6-mapped IPv4 address, therefore extract 467 | * the actual v4 address from the last 4 bytes 468 | */ 469 | local_ip += 12; 470 | } else { 471 | netdev_dbg(ovpn->dev, 472 | "%s: invalid length %zu for local IP\n", __func__, 473 | ip_len); 474 | goto sockfd_release; 475 | } 476 | } 477 | 478 | /* sanity checks passed */ 479 | ret = 0; 480 | } 481 | 482 | id = nla_get_u32(attrs[OVPN_NEW_PEER_ATTR_PEER_ID]); 483 | peer = ovpn_peer_new(ovpn, ss, sock, id, local_ip); 484 | if (IS_ERR(peer)) { 485 | netdev_err(ovpn->dev, "%s: cannot create new peer object for peer %u (sockaddr=%pIScp): %ld\n", 486 | __func__, id, ss, PTR_ERR(peer)); 487 | ret = PTR_ERR(peer); 488 | goto sockfd_release; 489 | } 490 | 491 | if (attrs[OVPN_NEW_PEER_ATTR_IPV4]) { 492 | if (nla_len(attrs[OVPN_NEW_PEER_ATTR_IPV4]) != sizeof(struct in_addr)) { 493 | ret = -EINVAL; 494 | goto peer_release; 495 | } 496 | 497 | peer->vpn_addrs.ipv4.s_addr = nla_get_be32(attrs[OVPN_NEW_PEER_ATTR_IPV4]); 498 | } 499 | 500 | if (attrs[OVPN_NEW_PEER_ATTR_IPV6]) { 501 | if (nla_len(attrs[OVPN_NEW_PEER_ATTR_IPV6]) != sizeof(struct in6_addr)) { 502 | ret = -EINVAL; 503 | goto peer_release; 504 | } 505 | 506 | memcpy(&peer->vpn_addrs.ipv6, nla_data(attrs[OVPN_NEW_PEER_ATTR_IPV6]), 507 | sizeof(struct in6_addr)); 508 | } 509 | 510 | netdev_dbg(ovpn->dev, 511 | "%s: adding peer with endpoint=%pIScp/%s id=%u VPN-IPv4=%pI4 VPN-IPv6=%pI6c\n", 512 | __func__, ss, sock->sk->sk_prot_creator->name, peer->id, 513 | &peer->vpn_addrs.ipv4.s_addr, &peer->vpn_addrs.ipv6); 514 | 515 | ret = ovpn_peer_add(ovpn, peer); 516 | if (ret < 0) { 517 | netdev_err(ovpn->dev, "%s: cannot add new peer (id=%u) to hashtable: %d\n", 518 | __func__, peer->id, ret); 519 | goto peer_release; 520 | } 521 | 522 | return 0; 523 | 524 | peer_release: 525 | /* release right away because peer is not really used in any context */ 526 | ovpn_peer_release(peer); 527 | return ret; 528 | 529 | sockfd_release: 530 | sockfd_put(sock); 531 | return ret; 532 | } 533 | 534 | static int ovpn_netlink_set_peer(struct sk_buff *skb, struct genl_info *info) 535 | { 536 | struct nlattr *attrs[OVPN_SET_PEER_ATTR_MAX + 1]; 537 | struct ovpn_struct *ovpn = info->user_ptr[0]; 538 | u32 peer_id, interv, timeout; 539 | bool keepalive_set = false; 540 | struct ovpn_peer *peer; 541 | int ret; 542 | 543 | if (!info->attrs[OVPN_ATTR_SET_PEER]) 544 | return -EINVAL; 545 | 546 | ret = nla_parse_nested(attrs, OVPN_SET_PEER_ATTR_MAX, info->attrs[OVPN_ATTR_SET_PEER], NULL, 547 | info->extack); 548 | if (ret) 549 | return ret; 550 | 551 | if (!attrs[OVPN_SET_PEER_ATTR_PEER_ID]) 552 | return -EINVAL; 553 | 554 | peer_id = nla_get_u32(attrs[OVPN_SET_PEER_ATTR_PEER_ID]); 555 | 556 | peer = ovpn_peer_lookup_id(ovpn, peer_id); 557 | if (!peer) 558 | return -ENOENT; 559 | 560 | /* when setting the keepalive, both parameters have to be configured */ 561 | if (attrs[OVPN_SET_PEER_ATTR_KEEPALIVE_INTERVAL] && 562 | attrs[OVPN_SET_PEER_ATTR_KEEPALIVE_TIMEOUT]) { 563 | keepalive_set = true; 564 | interv = nla_get_u32(attrs[OVPN_SET_PEER_ATTR_KEEPALIVE_INTERVAL]); 565 | timeout = nla_get_u32(attrs[OVPN_SET_PEER_ATTR_KEEPALIVE_TIMEOUT]); 566 | } 567 | 568 | if (keepalive_set) 569 | ovpn_peer_keepalive_set(peer, interv, timeout); 570 | 571 | ovpn_peer_put(peer); 572 | return 0; 573 | } 574 | 575 | static int ovpn_netlink_send_peer(struct sk_buff *skb, const struct ovpn_peer *peer, u32 portid, 576 | u32 seq, int flags) 577 | { 578 | const struct ovpn_bind *bind; 579 | struct nlattr *attr; 580 | void *hdr; 581 | 582 | hdr = genlmsg_put(skb, portid, seq, &ovpn_netlink_family, flags, OVPN_CMD_GET_PEER); 583 | if (!hdr) { 584 | netdev_dbg(peer->ovpn->dev, "%s: cannot create message header\n", __func__); 585 | return -EMSGSIZE; 586 | } 587 | 588 | attr = nla_nest_start(skb, OVPN_ATTR_GET_PEER); 589 | if (!attr) { 590 | netdev_dbg(peer->ovpn->dev, "%s: cannot create submessage\n", __func__); 591 | goto err; 592 | } 593 | 594 | if (nla_put_u32(skb, OVPN_GET_PEER_RESP_ATTR_PEER_ID, peer->id)) 595 | goto err; 596 | 597 | if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) 598 | if (nla_put(skb, OVPN_GET_PEER_RESP_ATTR_IPV4, sizeof(peer->vpn_addrs.ipv4), 599 | &peer->vpn_addrs.ipv4)) 600 | goto err; 601 | 602 | if (memcmp(&peer->vpn_addrs.ipv6, &in6addr_any, sizeof(peer->vpn_addrs.ipv6))) 603 | if (nla_put(skb, OVPN_GET_PEER_RESP_ATTR_IPV6, sizeof(peer->vpn_addrs.ipv6), 604 | &peer->vpn_addrs.ipv6)) 605 | goto err; 606 | 607 | if (nla_put_u32(skb, OVPN_GET_PEER_RESP_ATTR_KEEPALIVE_INTERVAL, 608 | peer->keepalive_interval) || 609 | nla_put_u32(skb, OVPN_GET_PEER_RESP_ATTR_KEEPALIVE_TIMEOUT, 610 | peer->keepalive_timeout)) 611 | goto err; 612 | 613 | rcu_read_lock(); 614 | bind = rcu_dereference(peer->bind); 615 | if (bind) { 616 | if (bind->sa.in4.sin_family == AF_INET) { 617 | if (nla_put(skb, OVPN_GET_PEER_RESP_ATTR_SOCKADDR_REMOTE, 618 | sizeof(bind->sa.in4), &bind->sa.in4) || 619 | nla_put(skb, OVPN_GET_PEER_RESP_ATTR_LOCAL_IP, 620 | sizeof(bind->local.ipv4), &bind->local.ipv4)) 621 | goto err_unlock; 622 | } else if (bind->sa.in4.sin_family == AF_INET6) { 623 | if (nla_put(skb, OVPN_GET_PEER_RESP_ATTR_SOCKADDR_REMOTE, 624 | sizeof(bind->sa.in6), &bind->sa.in6) || 625 | nla_put(skb, OVPN_GET_PEER_RESP_ATTR_LOCAL_IP, 626 | sizeof(bind->local.ipv6), &bind->local.ipv6)) 627 | goto err_unlock; 628 | } 629 | } 630 | rcu_read_unlock(); 631 | 632 | if (nla_put_net16(skb, OVPN_GET_PEER_RESP_ATTR_LOCAL_PORT, 633 | inet_sk(peer->sock->sock->sk)->inet_sport) || 634 | /* VPN RX stats */ 635 | nla_put_u64_64bit(skb, OVPN_GET_PEER_RESP_ATTR_VPN_RX_BYTES, 636 | atomic64_read(&peer->vpn_stats.rx.bytes), 637 | OVPN_GET_PEER_RESP_ATTR_UNSPEC) || 638 | nla_put_u32(skb, OVPN_GET_PEER_RESP_ATTR_VPN_RX_PACKETS, 639 | atomic_read(&peer->vpn_stats.rx.packets)) || 640 | /* VPN TX stats */ 641 | nla_put_u64_64bit(skb, OVPN_GET_PEER_RESP_ATTR_VPN_TX_BYTES, 642 | atomic64_read(&peer->vpn_stats.tx.bytes), 643 | OVPN_GET_PEER_RESP_ATTR_UNSPEC) || 644 | nla_put_u32(skb, OVPN_GET_PEER_RESP_ATTR_VPN_TX_PACKETS, 645 | atomic_read(&peer->vpn_stats.tx.packets)) || 646 | /* link RX stats */ 647 | nla_put_u64_64bit(skb, OVPN_GET_PEER_RESP_ATTR_LINK_RX_BYTES, 648 | atomic64_read(&peer->link_stats.rx.bytes), 649 | OVPN_GET_PEER_RESP_ATTR_UNSPEC) || 650 | nla_put_u32(skb, OVPN_GET_PEER_RESP_ATTR_LINK_RX_PACKETS, 651 | atomic_read(&peer->link_stats.rx.packets)) || 652 | /* link TX stats */ 653 | nla_put_u64_64bit(skb, OVPN_GET_PEER_RESP_ATTR_LINK_TX_BYTES, 654 | atomic64_read(&peer->link_stats.tx.bytes), 655 | OVPN_GET_PEER_RESP_ATTR_UNSPEC) || 656 | nla_put_u32(skb, OVPN_GET_PEER_RESP_ATTR_LINK_TX_PACKETS, 657 | atomic_read(&peer->link_stats.tx.packets))) 658 | goto err; 659 | 660 | nla_nest_end(skb, attr); 661 | genlmsg_end(skb, hdr); 662 | 663 | return 0; 664 | err_unlock: 665 | rcu_read_unlock(); 666 | err: 667 | genlmsg_cancel(skb, hdr); 668 | return -EMSGSIZE; 669 | } 670 | 671 | static int ovpn_netlink_get_peer(struct sk_buff *skb, struct genl_info *info) 672 | { 673 | struct nlattr *attrs[OVPN_SET_PEER_ATTR_MAX + 1]; 674 | struct ovpn_struct *ovpn = info->user_ptr[0]; 675 | struct ovpn_peer *peer; 676 | struct sk_buff *msg; 677 | u32 peer_id; 678 | int ret; 679 | 680 | if (!info->attrs[OVPN_ATTR_GET_PEER]) 681 | return -EINVAL; 682 | 683 | ret = nla_parse_nested(attrs, OVPN_GET_PEER_ATTR_MAX, info->attrs[OVPN_ATTR_GET_PEER], NULL, 684 | info->extack); 685 | if (ret) 686 | return ret; 687 | 688 | if (!attrs[OVPN_GET_PEER_ATTR_PEER_ID]) 689 | return -EINVAL; 690 | 691 | peer_id = nla_get_u32(attrs[OVPN_GET_PEER_ATTR_PEER_ID]); 692 | peer = ovpn_peer_lookup_id(ovpn, peer_id); 693 | if (!peer) 694 | return -ENOENT; 695 | 696 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 697 | if (!msg) 698 | return -ENOMEM; 699 | 700 | ret = ovpn_netlink_send_peer(msg, peer, info->snd_portid, info->snd_seq, 0); 701 | if (ret < 0) { 702 | nlmsg_free(msg); 703 | goto err; 704 | } 705 | 706 | ret = genlmsg_reply(msg, info); 707 | err: 708 | ovpn_peer_put(peer); 709 | return ret; 710 | } 711 | 712 | static int ovpn_netlink_dump_peers(struct sk_buff *skb, struct netlink_callback *cb) 713 | { 714 | struct net *netns = sock_net(cb->skb->sk); 715 | struct nlattr **attrbuf; 716 | struct ovpn_struct *ovpn; 717 | struct net_device *dev; 718 | int ret, bkt, last_idx = cb->args[1], dumped = 0; 719 | struct ovpn_peer *peer; 720 | 721 | attrbuf = kcalloc(OVPN_ATTR_MAX + 1, sizeof(*attrbuf), GFP_KERNEL); 722 | if (!attrbuf) 723 | return -ENOMEM; 724 | 725 | ret = nlmsg_parse_deprecated(cb->nlh, GENL_HDRLEN, attrbuf, OVPN_ATTR_MAX, 726 | ovpn_netlink_policy, NULL); 727 | if (ret < 0) { 728 | pr_err("ovpn: cannot parse incoming request in %s: %d\n", __func__, ret); 729 | goto err; 730 | } 731 | 732 | dev = ovpn_get_dev_from_attrs(netns, attrbuf); 733 | if (IS_ERR(dev)) { 734 | ret = PTR_ERR(dev); 735 | pr_err("ovpn: cannot retrieve device in %s: %d\n", __func__, ret); 736 | goto err; 737 | } 738 | 739 | ovpn = netdev_priv(dev); 740 | 741 | rcu_read_lock(); 742 | hash_for_each_rcu(ovpn->peers.by_id, bkt, peer, hash_entry_id) { 743 | /* skip already dumped peers that were dumped by previous invocations */ 744 | if (last_idx > 0) { 745 | last_idx--; 746 | continue; 747 | } 748 | 749 | if (ovpn_netlink_send_peer(skb, peer, NETLINK_CB(cb->skb).portid, 750 | cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0) 751 | break; 752 | 753 | /* count peers being dumped during this invocation */ 754 | dumped++; 755 | } 756 | rcu_read_unlock(); 757 | 758 | dev_put(dev); 759 | 760 | /* sum up peers dumped in this message, so that at the next invocation 761 | * we can continue from where we left 762 | */ 763 | cb->args[1] += dumped; 764 | ret = skb->len; 765 | err: 766 | kfree(attrbuf); 767 | return ret; 768 | } 769 | 770 | static int ovpn_netlink_del_peer(struct sk_buff *skb, struct genl_info *info) 771 | { 772 | struct nlattr *attrs[OVPN_SET_PEER_ATTR_MAX + 1]; 773 | struct ovpn_struct *ovpn = info->user_ptr[0]; 774 | struct ovpn_peer *peer; 775 | u32 peer_id; 776 | int ret; 777 | 778 | if (!info->attrs[OVPN_ATTR_DEL_PEER]) 779 | return -EINVAL; 780 | 781 | ret = nla_parse_nested(attrs, OVPN_DEL_PEER_ATTR_MAX, info->attrs[OVPN_ATTR_DEL_PEER], NULL, 782 | info->extack); 783 | if (ret) 784 | return ret; 785 | 786 | if (!attrs[OVPN_DEL_PEER_ATTR_PEER_ID]) 787 | return -EINVAL; 788 | 789 | peer_id = nla_get_u32(attrs[OVPN_DEL_PEER_ATTR_PEER_ID]); 790 | 791 | peer = ovpn_peer_lookup_id(ovpn, peer_id); 792 | if (!peer) 793 | return -ENOENT; 794 | 795 | netdev_dbg(ovpn->dev, "%s: peer id=%u\n", __func__, peer->id); 796 | ret = ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_USERSPACE); 797 | ovpn_peer_put(peer); 798 | 799 | return ret; 800 | } 801 | 802 | static const struct genl_small_ops ovpn_netlink_ops[] = { 803 | { 804 | .cmd = OVPN_CMD_NEW_PEER, 805 | .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, 806 | .doit = ovpn_netlink_new_peer, 807 | }, 808 | { 809 | .cmd = OVPN_CMD_SET_PEER, 810 | .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, 811 | .doit = ovpn_netlink_set_peer, 812 | }, 813 | { 814 | .cmd = OVPN_CMD_DEL_PEER, 815 | .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, 816 | .doit = ovpn_netlink_del_peer, 817 | }, 818 | { 819 | .cmd = OVPN_CMD_GET_PEER, 820 | .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO | GENL_CMD_CAP_DUMP, 821 | .doit = ovpn_netlink_get_peer, 822 | .dumpit = ovpn_netlink_dump_peers, 823 | }, 824 | { 825 | .cmd = OVPN_CMD_NEW_KEY, 826 | .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, 827 | .doit = ovpn_netlink_new_key, 828 | }, 829 | { 830 | .cmd = OVPN_CMD_DEL_KEY, 831 | .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, 832 | .doit = ovpn_netlink_del_key, 833 | }, 834 | { 835 | .cmd = OVPN_CMD_SWAP_KEYS, 836 | .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, 837 | .doit = ovpn_netlink_swap_keys, 838 | }, 839 | }; 840 | 841 | static struct genl_family ovpn_netlink_family __ro_after_init = { 842 | .hdrsize = 0, 843 | .name = OVPN_NL_NAME, 844 | .version = 1, 845 | .maxattr = OVPN_ATTR_MAX, 846 | .policy = ovpn_netlink_policy, 847 | .netnsok = true, 848 | .pre_doit = ovpn_pre_doit, 849 | .post_doit = ovpn_post_doit, 850 | .module = THIS_MODULE, 851 | .small_ops = ovpn_netlink_ops, 852 | .n_small_ops = ARRAY_SIZE(ovpn_netlink_ops), 853 | .mcgrps = ovpn_netlink_mcgrps, 854 | .n_mcgrps = ARRAY_SIZE(ovpn_netlink_mcgrps), 855 | }; 856 | 857 | int ovpn_netlink_notify_del_peer(struct ovpn_peer *peer) 858 | { 859 | struct sk_buff *msg; 860 | struct nlattr *attr; 861 | void *hdr; 862 | int ret; 863 | 864 | netdev_info(peer->ovpn->dev, "deleting peer with id %u, reason %d\n", 865 | peer->id, peer->delete_reason); 866 | 867 | msg = nlmsg_new(100, GFP_KERNEL); 868 | if (!msg) 869 | return -ENOMEM; 870 | 871 | hdr = genlmsg_put(msg, 0, 0, &ovpn_netlink_family, 0, 872 | OVPN_CMD_DEL_PEER); 873 | if (!hdr) { 874 | ret = -ENOBUFS; 875 | goto err_free_msg; 876 | } 877 | 878 | if (nla_put_u32(msg, OVPN_ATTR_IFINDEX, peer->ovpn->dev->ifindex)) { 879 | ret = -EMSGSIZE; 880 | goto err_free_msg; 881 | } 882 | 883 | attr = nla_nest_start(msg, OVPN_ATTR_DEL_PEER); 884 | if (!attr) { 885 | ret = -EMSGSIZE; 886 | goto err_free_msg; 887 | } 888 | 889 | if (nla_put_u8(msg, OVPN_DEL_PEER_ATTR_REASON, peer->delete_reason)) { 890 | ret = -EMSGSIZE; 891 | goto err_free_msg; 892 | } 893 | 894 | if (nla_put_u32(msg, OVPN_DEL_PEER_ATTR_PEER_ID, peer->id)) { 895 | ret = -EMSGSIZE; 896 | goto err_free_msg; 897 | } 898 | 899 | nla_nest_end(msg, attr); 900 | 901 | genlmsg_end(msg, hdr); 902 | 903 | genlmsg_multicast_netns(&ovpn_netlink_family, dev_net(peer->ovpn->dev), 904 | msg, 0, OVPN_MCGRP_PEERS, GFP_KERNEL); 905 | 906 | return 0; 907 | 908 | err_free_msg: 909 | nlmsg_free(msg); 910 | return ret; 911 | } 912 | 913 | static int ovpn_netlink_notify(struct notifier_block *nb, unsigned long state, 914 | void *_notify) 915 | { 916 | struct netlink_notify *notify = _notify; 917 | struct ovpn_struct *ovpn; 918 | struct net_device *dev; 919 | struct net *netns; 920 | bool found = false; 921 | 922 | if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC) 923 | return NOTIFY_DONE; 924 | 925 | rcu_read_lock(); 926 | for_each_net_rcu(netns) { 927 | for_each_netdev_rcu(netns, dev) { 928 | if (!ovpn_dev_is_valid(dev)) 929 | continue; 930 | 931 | ovpn = netdev_priv(dev); 932 | if (notify->portid != ovpn->registered_nl_portid) 933 | continue; 934 | 935 | found = true; 936 | netdev_dbg(ovpn->dev, "%s: deregistering userspace listener\n", __func__); 937 | ovpn->registered_nl_portid_set = false; 938 | break; 939 | } 940 | } 941 | rcu_read_unlock(); 942 | 943 | /* if no interface matched our purposes, pass the notification along */ 944 | if (!found) 945 | return NOTIFY_DONE; 946 | 947 | return NOTIFY_OK; 948 | } 949 | 950 | static struct notifier_block ovpn_netlink_notifier = { 951 | .notifier_call = ovpn_netlink_notify, 952 | }; 953 | 954 | int ovpn_netlink_init(struct ovpn_struct *ovpn) 955 | { 956 | ovpn->registered_nl_portid_set = false; 957 | 958 | return 0; 959 | } 960 | 961 | /** 962 | * ovpn_netlink_register() - register the ovpn genl netlink family 963 | */ 964 | int __init ovpn_netlink_register(void) 965 | { 966 | int ret; 967 | 968 | ret = genl_register_family(&ovpn_netlink_family); 969 | if (ret) { 970 | pr_err("ovpn: genl_register_family() failed: %d\n", ret); 971 | return ret; 972 | } 973 | 974 | ret = netlink_register_notifier(&ovpn_netlink_notifier); 975 | if (ret) { 976 | pr_err("ovpn: netlink_register_notifier() failed: %d\n", ret); 977 | goto err; 978 | } 979 | 980 | return 0; 981 | err: 982 | genl_unregister_family(&ovpn_netlink_family); 983 | return ret; 984 | } 985 | 986 | /** 987 | * ovpn_netlink_unregister() - unregister the ovpn genl netlink family 988 | */ 989 | void __exit ovpn_netlink_unregister(void) 990 | { 991 | netlink_unregister_notifier(&ovpn_netlink_notifier); 992 | genl_unregister_family(&ovpn_netlink_family); 993 | } 994 | --------------------------------------------------------------------------------