Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* |
| 3 | * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. |
| 4 | */ |
| 5 | |
| 6 | #ifndef _WG_QUEUEING_H |
| 7 | #define _WG_QUEUEING_H |
| 8 | |
| 9 | #include "peer.h" |
| 10 | #include <linux/types.h> |
| 11 | #include <linux/skbuff.h> |
| 12 | #include <linux/ip.h> |
| 13 | #include <linux/ipv6.h> |
| 14 | #include <net/ip_tunnels.h> |
| 15 | |
| 16 | struct wg_device; |
| 17 | struct wg_peer; |
| 18 | struct multicore_worker; |
| 19 | struct crypt_queue; |
| 20 | struct prev_queue; |
| 21 | struct sk_buff; |
| 22 | |
| 23 | /* queueing.c APIs: */ |
| 24 | int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, |
| 25 | unsigned int len); |
| 26 | void wg_packet_queue_free(struct crypt_queue *queue, bool purge); |
| 27 | struct multicore_worker __percpu * |
| 28 | wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr); |
| 29 | |
| 30 | /* receive.c APIs: */ |
| 31 | void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb); |
| 32 | void wg_packet_handshake_receive_worker(struct work_struct *work); |
| 33 | /* NAPI poll function: */ |
| 34 | int wg_packet_rx_poll(struct napi_struct *napi, int budget); |
| 35 | /* Workqueue worker: */ |
| 36 | void wg_packet_decrypt_worker(struct work_struct *work); |
| 37 | |
| 38 | /* send.c APIs: */ |
| 39 | void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer, |
| 40 | bool is_retry); |
| 41 | void wg_packet_send_handshake_response(struct wg_peer *peer); |
| 42 | void wg_packet_send_handshake_cookie(struct wg_device *wg, |
| 43 | struct sk_buff *initiating_skb, |
| 44 | __le32 sender_index); |
| 45 | void wg_packet_send_keepalive(struct wg_peer *peer); |
| 46 | void wg_packet_purge_staged_packets(struct wg_peer *peer); |
| 47 | void wg_packet_send_staged_packets(struct wg_peer *peer); |
| 48 | /* Workqueue workers: */ |
| 49 | void wg_packet_handshake_send_worker(struct work_struct *work); |
| 50 | void wg_packet_tx_worker(struct work_struct *work); |
| 51 | void wg_packet_encrypt_worker(struct work_struct *work); |
| 52 | |
| 53 | enum packet_state { |
| 54 | PACKET_STATE_UNCRYPTED, |
| 55 | PACKET_STATE_CRYPTED, |
| 56 | PACKET_STATE_DEAD |
| 57 | }; |
| 58 | |
| 59 | struct packet_cb { |
| 60 | u64 nonce; |
| 61 | struct noise_keypair *keypair; |
| 62 | atomic_t state; |
| 63 | u32 mtu; |
| 64 | u8 ds; |
| 65 | }; |
| 66 | |
| 67 | #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb)) |
| 68 | #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) |
| 69 | |
| 70 | static inline bool wg_check_packet_protocol(struct sk_buff *skb) |
| 71 | { |
| 72 | __be16 real_protocol = ip_tunnel_parse_protocol(skb); |
| 73 | return real_protocol && skb->protocol == real_protocol; |
| 74 | } |
| 75 | |
| 76 | static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating) |
| 77 | { |
| 78 | u8 l4_hash = skb->l4_hash; |
| 79 | u8 sw_hash = skb->sw_hash; |
| 80 | u32 hash = skb->hash; |
| 81 | skb_scrub_packet(skb, true); |
| 82 | memset(&skb->headers_start, 0, |
| 83 | offsetof(struct sk_buff, headers_end) - |
| 84 | offsetof(struct sk_buff, headers_start)); |
| 85 | if (encapsulating) { |
| 86 | skb->l4_hash = l4_hash; |
| 87 | skb->sw_hash = sw_hash; |
| 88 | skb->hash = hash; |
| 89 | } |
| 90 | skb->queue_mapping = 0; |
| 91 | skb->nohdr = 0; |
| 92 | skb->peeked = 0; |
| 93 | skb->mac_len = 0; |
| 94 | skb->dev = NULL; |
| 95 | #ifdef CONFIG_NET_SCHED |
| 96 | skb->tc_index = 0; |
| 97 | #endif |
| 98 | skb_reset_redirect(skb); |
| 99 | skb->hdr_len = skb_headroom(skb); |
| 100 | skb_reset_mac_header(skb); |
| 101 | skb_reset_network_header(skb); |
| 102 | skb_reset_transport_header(skb); |
| 103 | skb_probe_transport_header(skb); |
| 104 | skb_reset_inner_headers(skb); |
| 105 | } |
| 106 | |
| 107 | static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id) |
| 108 | { |
| 109 | unsigned int cpu = *stored_cpu, cpu_index, i; |
| 110 | |
| 111 | if (unlikely(cpu == nr_cpumask_bits || |
| 112 | !cpumask_test_cpu(cpu, cpu_online_mask))) { |
| 113 | cpu_index = id % cpumask_weight(cpu_online_mask); |
| 114 | cpu = cpumask_first(cpu_online_mask); |
| 115 | for (i = 0; i < cpu_index; ++i) |
| 116 | cpu = cpumask_next(cpu, cpu_online_mask); |
| 117 | *stored_cpu = cpu; |
| 118 | } |
| 119 | return cpu; |
| 120 | } |
| 121 | |
| 122 | /* This function is racy, in the sense that next is unlocked, so it could return |
| 123 | * the same CPU twice. A race-free version of this would be to instead store an |
| 124 | * atomic sequence number, do an increment-and-return, and then iterate through |
| 125 | * every possible CPU until we get to that index -- choose_cpu. However that's |
| 126 | * a bit slower, and it doesn't seem like this potential race actually |
| 127 | * introduces any performance loss, so we live with it. |
| 128 | */ |
| 129 | static inline int wg_cpumask_next_online(int *next) |
| 130 | { |
| 131 | int cpu = *next; |
| 132 | |
| 133 | while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask))) |
| 134 | cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits; |
| 135 | *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits; |
| 136 | return cpu; |
| 137 | } |
| 138 | |
| 139 | void wg_prev_queue_init(struct prev_queue *queue); |
| 140 | |
| 141 | /* Multi producer */ |
| 142 | bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb); |
| 143 | |
| 144 | /* Single consumer */ |
| 145 | struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue); |
| 146 | |
| 147 | /* Single consumer */ |
| 148 | static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue) |
| 149 | { |
| 150 | if (queue->peeked) |
| 151 | return queue->peeked; |
| 152 | queue->peeked = wg_prev_queue_dequeue(queue); |
| 153 | return queue->peeked; |
| 154 | } |
| 155 | |
| 156 | /* Single consumer */ |
| 157 | static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue) |
| 158 | { |
| 159 | queue->peeked = NULL; |
| 160 | } |
| 161 | |
| 162 | static inline int wg_queue_enqueue_per_device_and_peer( |
| 163 | struct crypt_queue *device_queue, struct prev_queue *peer_queue, |
| 164 | struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu) |
| 165 | { |
| 166 | int cpu; |
| 167 | |
| 168 | atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED); |
| 169 | /* We first queue this up for the peer ingestion, but the consumer |
| 170 | * will wait for the state to change to CRYPTED or DEAD before. |
| 171 | */ |
| 172 | if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb))) |
| 173 | return -ENOSPC; |
| 174 | |
| 175 | /* Then we queue it up in the device queue, which consumes the |
| 176 | * packet as soon as it can. |
| 177 | */ |
| 178 | cpu = wg_cpumask_next_online(next_cpu); |
| 179 | if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb))) |
| 180 | return -EPIPE; |
| 181 | queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work); |
| 182 | return 0; |
| 183 | } |
| 184 | |
| 185 | static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state) |
| 186 | { |
| 187 | /* We take a reference, because as soon as we call atomic_set, the |
| 188 | * peer can be freed from below us. |
| 189 | */ |
| 190 | struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); |
| 191 | |
| 192 | atomic_set_release(&PACKET_CB(skb)->state, state); |
| 193 | queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), |
| 194 | peer->device->packet_crypt_wq, &peer->transmit_packet_work); |
| 195 | wg_peer_put(peer); |
| 196 | } |
| 197 | |
| 198 | static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state) |
| 199 | { |
| 200 | /* We take a reference, because as soon as we call atomic_set, the |
| 201 | * peer can be freed from below us. |
| 202 | */ |
| 203 | struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); |
| 204 | |
| 205 | atomic_set_release(&PACKET_CB(skb)->state, state); |
| 206 | napi_schedule(&peer->napi); |
| 207 | wg_peer_put(peer); |
| 208 | } |
| 209 | |
| 210 | #ifdef DEBUG |
| 211 | bool wg_packet_counter_selftest(void); |
| 212 | #endif |
| 213 | |
| 214 | #endif /* _WG_QUEUEING_H */ |