David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* A network driver using virtio. |
| 3 | * |
| 4 | * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5 | */ |
| 6 | //#define DEBUG |
| 7 | #include <linux/netdevice.h> |
| 8 | #include <linux/etherdevice.h> |
| 9 | #include <linux/ethtool.h> |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/virtio.h> |
| 12 | #include <linux/virtio_net.h> |
| 13 | #include <linux/bpf.h> |
| 14 | #include <linux/bpf_trace.h> |
| 15 | #include <linux/scatterlist.h> |
| 16 | #include <linux/if_vlan.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/cpu.h> |
| 19 | #include <linux/average.h> |
| 20 | #include <linux/filter.h> |
| 21 | #include <linux/kernel.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 22 | #include <net/route.h> |
| 23 | #include <net/xdp.h> |
| 24 | #include <net/net_failover.h> |
| 25 | |
| 26 | static int napi_weight = NAPI_POLL_WEIGHT; |
| 27 | module_param(napi_weight, int, 0444); |
| 28 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 29 | static bool csum = true, gso = true, napi_tx = true; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 30 | module_param(csum, bool, 0444); |
| 31 | module_param(gso, bool, 0444); |
| 32 | module_param(napi_tx, bool, 0644); |
| 33 | |
| 34 | /* FIXME: MTU in config. */ |
| 35 | #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) |
| 36 | #define GOOD_COPY_LEN 128 |
| 37 | |
| 38 | #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) |
| 39 | |
| 40 | /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ |
| 41 | #define VIRTIO_XDP_HEADROOM 256 |
| 42 | |
| 43 | /* Separating two types of XDP xmit */ |
| 44 | #define VIRTIO_XDP_TX BIT(0) |
| 45 | #define VIRTIO_XDP_REDIR BIT(1) |
| 46 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 47 | #define VIRTIO_XDP_FLAG BIT(0) |
| 48 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 49 | /* RX packet size EWMA. The average packet size is used to determine the packet |
| 50 | * buffer size when refilling RX rings. As the entire RX ring may be refilled |
| 51 | * at once, the weight is chosen so that the EWMA will be insensitive to short- |
| 52 | * term, transient changes in packet size. |
| 53 | */ |
| 54 | DECLARE_EWMA(pkt_len, 0, 64) |
| 55 | |
| 56 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
| 57 | |
| 58 | static const unsigned long guest_offloads[] = { |
| 59 | VIRTIO_NET_F_GUEST_TSO4, |
| 60 | VIRTIO_NET_F_GUEST_TSO6, |
| 61 | VIRTIO_NET_F_GUEST_ECN, |
| 62 | VIRTIO_NET_F_GUEST_UFO, |
| 63 | VIRTIO_NET_F_GUEST_CSUM |
| 64 | }; |
| 65 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 66 | #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ |
| 67 | (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ |
| 68 | (1ULL << VIRTIO_NET_F_GUEST_ECN) | \ |
| 69 | (1ULL << VIRTIO_NET_F_GUEST_UFO)) |
| 70 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 71 | struct virtnet_stat_desc { |
| 72 | char desc[ETH_GSTRING_LEN]; |
| 73 | size_t offset; |
| 74 | }; |
| 75 | |
| 76 | struct virtnet_sq_stats { |
| 77 | struct u64_stats_sync syncp; |
| 78 | u64 packets; |
| 79 | u64 bytes; |
| 80 | u64 xdp_tx; |
| 81 | u64 xdp_tx_drops; |
| 82 | u64 kicks; |
| 83 | }; |
| 84 | |
| 85 | struct virtnet_rq_stats { |
| 86 | struct u64_stats_sync syncp; |
| 87 | u64 packets; |
| 88 | u64 bytes; |
| 89 | u64 drops; |
| 90 | u64 xdp_packets; |
| 91 | u64 xdp_tx; |
| 92 | u64 xdp_redirects; |
| 93 | u64 xdp_drops; |
| 94 | u64 kicks; |
| 95 | }; |
| 96 | |
| 97 | #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) |
| 98 | #define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) |
| 99 | |
| 100 | static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { |
| 101 | { "packets", VIRTNET_SQ_STAT(packets) }, |
| 102 | { "bytes", VIRTNET_SQ_STAT(bytes) }, |
| 103 | { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, |
| 104 | { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, |
| 105 | { "kicks", VIRTNET_SQ_STAT(kicks) }, |
| 106 | }; |
| 107 | |
| 108 | static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { |
| 109 | { "packets", VIRTNET_RQ_STAT(packets) }, |
| 110 | { "bytes", VIRTNET_RQ_STAT(bytes) }, |
| 111 | { "drops", VIRTNET_RQ_STAT(drops) }, |
| 112 | { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) }, |
| 113 | { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) }, |
| 114 | { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) }, |
| 115 | { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) }, |
| 116 | { "kicks", VIRTNET_RQ_STAT(kicks) }, |
| 117 | }; |
| 118 | |
| 119 | #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) |
| 120 | #define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc) |
| 121 | |
| 122 | /* Internal representation of a send virtqueue */ |
| 123 | struct send_queue { |
| 124 | /* Virtqueue associated with this send _queue */ |
| 125 | struct virtqueue *vq; |
| 126 | |
| 127 | /* TX: fragments + linear part + virtio header */ |
| 128 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; |
| 129 | |
| 130 | /* Name of the send queue: output.$index */ |
| 131 | char name[40]; |
| 132 | |
| 133 | struct virtnet_sq_stats stats; |
| 134 | |
| 135 | struct napi_struct napi; |
| 136 | }; |
| 137 | |
| 138 | /* Internal representation of a receive virtqueue */ |
| 139 | struct receive_queue { |
| 140 | /* Virtqueue associated with this receive_queue */ |
| 141 | struct virtqueue *vq; |
| 142 | |
| 143 | struct napi_struct napi; |
| 144 | |
| 145 | struct bpf_prog __rcu *xdp_prog; |
| 146 | |
| 147 | struct virtnet_rq_stats stats; |
| 148 | |
| 149 | /* Chain pages by the private ptr. */ |
| 150 | struct page *pages; |
| 151 | |
| 152 | /* Average packet length for mergeable receive buffers. */ |
| 153 | struct ewma_pkt_len mrg_avg_pkt_len; |
| 154 | |
| 155 | /* Page frag for packet buffer allocation. */ |
| 156 | struct page_frag alloc_frag; |
| 157 | |
| 158 | /* RX: fragments + linear part + virtio header */ |
| 159 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; |
| 160 | |
| 161 | /* Min single buffer size for mergeable buffers case. */ |
| 162 | unsigned int min_buf_len; |
| 163 | |
| 164 | /* Name of this receive queue: input.$index */ |
| 165 | char name[40]; |
| 166 | |
| 167 | struct xdp_rxq_info xdp_rxq; |
| 168 | }; |
| 169 | |
| 170 | /* Control VQ buffers: protected by the rtnl lock */ |
| 171 | struct control_buf { |
| 172 | struct virtio_net_ctrl_hdr hdr; |
| 173 | virtio_net_ctrl_ack status; |
| 174 | struct virtio_net_ctrl_mq mq; |
| 175 | u8 promisc; |
| 176 | u8 allmulti; |
| 177 | __virtio16 vid; |
| 178 | __virtio64 offloads; |
| 179 | }; |
| 180 | |
| 181 | struct virtnet_info { |
| 182 | struct virtio_device *vdev; |
| 183 | struct virtqueue *cvq; |
| 184 | struct net_device *dev; |
| 185 | struct send_queue *sq; |
| 186 | struct receive_queue *rq; |
| 187 | unsigned int status; |
| 188 | |
| 189 | /* Max # of queue pairs supported by the device */ |
| 190 | u16 max_queue_pairs; |
| 191 | |
| 192 | /* # of queue pairs currently used by the driver */ |
| 193 | u16 curr_queue_pairs; |
| 194 | |
| 195 | /* # of XDP queue pairs currently used by the driver */ |
| 196 | u16 xdp_queue_pairs; |
| 197 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 198 | /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */ |
| 199 | bool xdp_enabled; |
| 200 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 201 | /* I like... big packets and I cannot lie! */ |
| 202 | bool big_packets; |
| 203 | |
| 204 | /* Host will merge rx buffers for big packets (shake it! shake it!) */ |
| 205 | bool mergeable_rx_bufs; |
| 206 | |
| 207 | /* Has control virtqueue */ |
| 208 | bool has_cvq; |
| 209 | |
| 210 | /* Host can handle any s/g split between our header and packet data */ |
| 211 | bool any_header_sg; |
| 212 | |
| 213 | /* Packet virtio header size */ |
| 214 | u8 hdr_len; |
| 215 | |
| 216 | /* Work struct for refilling if we run low on memory. */ |
| 217 | struct delayed_work refill; |
| 218 | |
| 219 | /* Work struct for config space updates */ |
| 220 | struct work_struct config_work; |
| 221 | |
| 222 | /* Does the affinity hint is set for virtqueues? */ |
| 223 | bool affinity_hint_set; |
| 224 | |
| 225 | /* CPU hotplug instances for online & dead */ |
| 226 | struct hlist_node node; |
| 227 | struct hlist_node node_dead; |
| 228 | |
| 229 | struct control_buf *ctrl; |
| 230 | |
| 231 | /* Ethtool settings */ |
| 232 | u8 duplex; |
| 233 | u32 speed; |
| 234 | |
| 235 | unsigned long guest_offloads; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 236 | unsigned long guest_offloads_capable; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 237 | |
| 238 | /* failover when STANDBY feature enabled */ |
| 239 | struct failover *failover; |
| 240 | }; |
| 241 | |
| 242 | struct padded_vnet_hdr { |
| 243 | struct virtio_net_hdr_mrg_rxbuf hdr; |
| 244 | /* |
| 245 | * hdr is in a separate sg buffer, and data sg buffer shares same page |
| 246 | * with this header sg. This padding makes next sg 16 byte aligned |
| 247 | * after the header. |
| 248 | */ |
| 249 | char padding[4]; |
| 250 | }; |
| 251 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 252 | static bool is_xdp_frame(void *ptr) |
| 253 | { |
| 254 | return (unsigned long)ptr & VIRTIO_XDP_FLAG; |
| 255 | } |
| 256 | |
| 257 | static void *xdp_to_ptr(struct xdp_frame *ptr) |
| 258 | { |
| 259 | return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG); |
| 260 | } |
| 261 | |
| 262 | static struct xdp_frame *ptr_to_xdp(void *ptr) |
| 263 | { |
| 264 | return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG); |
| 265 | } |
| 266 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 267 | /* Converting between virtqueue no. and kernel tx/rx queue no. |
| 268 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq |
| 269 | */ |
| 270 | static int vq2txq(struct virtqueue *vq) |
| 271 | { |
| 272 | return (vq->index - 1) / 2; |
| 273 | } |
| 274 | |
| 275 | static int txq2vq(int txq) |
| 276 | { |
| 277 | return txq * 2 + 1; |
| 278 | } |
| 279 | |
| 280 | static int vq2rxq(struct virtqueue *vq) |
| 281 | { |
| 282 | return vq->index / 2; |
| 283 | } |
| 284 | |
| 285 | static int rxq2vq(int rxq) |
| 286 | { |
| 287 | return rxq * 2; |
| 288 | } |
| 289 | |
| 290 | static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) |
| 291 | { |
| 292 | return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; |
| 293 | } |
| 294 | |
| 295 | /* |
| 296 | * private is used to chain pages for big packets, put the whole |
| 297 | * most recent used list in the beginning for reuse |
| 298 | */ |
| 299 | static void give_pages(struct receive_queue *rq, struct page *page) |
| 300 | { |
| 301 | struct page *end; |
| 302 | |
| 303 | /* Find end of list, sew whole thing into vi->rq.pages. */ |
| 304 | for (end = page; end->private; end = (struct page *)end->private); |
| 305 | end->private = (unsigned long)rq->pages; |
| 306 | rq->pages = page; |
| 307 | } |
| 308 | |
| 309 | static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) |
| 310 | { |
| 311 | struct page *p = rq->pages; |
| 312 | |
| 313 | if (p) { |
| 314 | rq->pages = (struct page *)p->private; |
| 315 | /* clear private here, it is used to chain pages */ |
| 316 | p->private = 0; |
| 317 | } else |
| 318 | p = alloc_page(gfp_mask); |
| 319 | return p; |
| 320 | } |
| 321 | |
| 322 | static void virtqueue_napi_schedule(struct napi_struct *napi, |
| 323 | struct virtqueue *vq) |
| 324 | { |
| 325 | if (napi_schedule_prep(napi)) { |
| 326 | virtqueue_disable_cb(vq); |
| 327 | __napi_schedule(napi); |
| 328 | } |
| 329 | } |
| 330 | |
| 331 | static void virtqueue_napi_complete(struct napi_struct *napi, |
| 332 | struct virtqueue *vq, int processed) |
| 333 | { |
| 334 | int opaque; |
| 335 | |
| 336 | opaque = virtqueue_enable_cb_prepare(vq); |
| 337 | if (napi_complete_done(napi, processed)) { |
| 338 | if (unlikely(virtqueue_poll(vq, opaque))) |
| 339 | virtqueue_napi_schedule(napi, vq); |
| 340 | } else { |
| 341 | virtqueue_disable_cb(vq); |
| 342 | } |
| 343 | } |
| 344 | |
| 345 | static void skb_xmit_done(struct virtqueue *vq) |
| 346 | { |
| 347 | struct virtnet_info *vi = vq->vdev->priv; |
| 348 | struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; |
| 349 | |
| 350 | /* Suppress further interrupts. */ |
| 351 | virtqueue_disable_cb(vq); |
| 352 | |
| 353 | if (napi->weight) |
| 354 | virtqueue_napi_schedule(napi, vq); |
| 355 | else |
| 356 | /* We were probably waiting for more output buffers. */ |
| 357 | netif_wake_subqueue(vi->dev, vq2txq(vq)); |
| 358 | } |
| 359 | |
| 360 | #define MRG_CTX_HEADER_SHIFT 22 |
| 361 | static void *mergeable_len_to_ctx(unsigned int truesize, |
| 362 | unsigned int headroom) |
| 363 | { |
| 364 | return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize); |
| 365 | } |
| 366 | |
| 367 | static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx) |
| 368 | { |
| 369 | return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; |
| 370 | } |
| 371 | |
| 372 | static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) |
| 373 | { |
| 374 | return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); |
| 375 | } |
| 376 | |
| 377 | /* Called from bottom half context */ |
| 378 | static struct sk_buff *page_to_skb(struct virtnet_info *vi, |
| 379 | struct receive_queue *rq, |
| 380 | struct page *page, unsigned int offset, |
| 381 | unsigned int len, unsigned int truesize, |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 382 | bool hdr_valid, unsigned int metasize) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 383 | { |
| 384 | struct sk_buff *skb; |
| 385 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
| 386 | unsigned int copy, hdr_len, hdr_padded_len; |
| 387 | char *p; |
| 388 | |
| 389 | p = page_address(page) + offset; |
| 390 | |
| 391 | /* copy small packet so we can reuse these pages for small data */ |
| 392 | skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); |
| 393 | if (unlikely(!skb)) |
| 394 | return NULL; |
| 395 | |
| 396 | hdr = skb_vnet_hdr(skb); |
| 397 | |
| 398 | hdr_len = vi->hdr_len; |
| 399 | if (vi->mergeable_rx_bufs) |
| 400 | hdr_padded_len = sizeof(*hdr); |
| 401 | else |
| 402 | hdr_padded_len = sizeof(struct padded_vnet_hdr); |
| 403 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 404 | /* hdr_valid means no XDP, so we can copy the vnet header */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 405 | if (hdr_valid) |
| 406 | memcpy(hdr, p, hdr_len); |
| 407 | |
| 408 | len -= hdr_len; |
| 409 | offset += hdr_padded_len; |
| 410 | p += hdr_padded_len; |
| 411 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 412 | /* Copy all frame if it fits skb->head, otherwise |
| 413 | * we let virtio_net_hdr_to_skb() and GRO pull headers as needed. |
| 414 | */ |
| 415 | if (len <= skb_tailroom(skb)) |
| 416 | copy = len; |
| 417 | else |
| 418 | copy = ETH_HLEN + metasize; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 419 | skb_put_data(skb, p, copy); |
| 420 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 421 | if (metasize) { |
| 422 | __skb_pull(skb, metasize); |
| 423 | skb_metadata_set(skb, metasize); |
| 424 | } |
| 425 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 426 | len -= copy; |
| 427 | offset += copy; |
| 428 | |
| 429 | if (vi->mergeable_rx_bufs) { |
| 430 | if (len) |
| 431 | skb_add_rx_frag(skb, 0, page, offset, len, truesize); |
| 432 | else |
| 433 | put_page(page); |
| 434 | return skb; |
| 435 | } |
| 436 | |
| 437 | /* |
| 438 | * Verify that we can indeed put this data into a skb. |
| 439 | * This is here to handle cases when the device erroneously |
| 440 | * tries to receive more than is possible. This is usually |
| 441 | * the case of a broken device. |
| 442 | */ |
| 443 | if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { |
| 444 | net_dbg_ratelimited("%s: too much data\n", skb->dev->name); |
| 445 | dev_kfree_skb(skb); |
| 446 | return NULL; |
| 447 | } |
| 448 | BUG_ON(offset >= PAGE_SIZE); |
| 449 | while (len) { |
| 450 | unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); |
| 451 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, |
| 452 | frag_size, truesize); |
| 453 | len -= frag_size; |
| 454 | page = (struct page *)page->private; |
| 455 | offset = 0; |
| 456 | } |
| 457 | |
| 458 | if (page) |
| 459 | give_pages(rq, page); |
| 460 | |
| 461 | return skb; |
| 462 | } |
| 463 | |
| 464 | static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, |
| 465 | struct send_queue *sq, |
| 466 | struct xdp_frame *xdpf) |
| 467 | { |
| 468 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
| 469 | int err; |
| 470 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 471 | if (unlikely(xdpf->headroom < vi->hdr_len)) |
| 472 | return -EOVERFLOW; |
| 473 | |
| 474 | /* Make room for virtqueue hdr (also change xdpf->headroom?) */ |
| 475 | xdpf->data -= vi->hdr_len; |
| 476 | /* Zero header and leave csum up to XDP layers */ |
| 477 | hdr = xdpf->data; |
| 478 | memset(hdr, 0, vi->hdr_len); |
| 479 | xdpf->len += vi->hdr_len; |
| 480 | |
| 481 | sg_init_one(sq->sg, xdpf->data, xdpf->len); |
| 482 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 483 | err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), |
| 484 | GFP_ATOMIC); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 485 | if (unlikely(err)) |
| 486 | return -ENOSPC; /* Caller handle free/refcnt */ |
| 487 | |
| 488 | return 0; |
| 489 | } |
| 490 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 491 | /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on |
| 492 | * the current cpu, so it does not need to be locked. |
| 493 | * |
| 494 | * Here we use marco instead of inline functions because we have to deal with |
| 495 | * three issues at the same time: 1. the choice of sq. 2. judge and execute the |
| 496 | * lock/unlock of txq 3. make sparse happy. It is difficult for two inline |
| 497 | * functions to perfectly solve these three problems at the same time. |
| 498 | */ |
| 499 | #define virtnet_xdp_get_sq(vi) ({ \ |
| 500 | struct netdev_queue *txq; \ |
| 501 | typeof(vi) v = (vi); \ |
| 502 | unsigned int qp; \ |
| 503 | \ |
| 504 | if (v->curr_queue_pairs > nr_cpu_ids) { \ |
| 505 | qp = v->curr_queue_pairs - v->xdp_queue_pairs; \ |
| 506 | qp += smp_processor_id(); \ |
| 507 | txq = netdev_get_tx_queue(v->dev, qp); \ |
| 508 | __netif_tx_acquire(txq); \ |
| 509 | } else { \ |
| 510 | qp = smp_processor_id() % v->curr_queue_pairs; \ |
| 511 | txq = netdev_get_tx_queue(v->dev, qp); \ |
| 512 | __netif_tx_lock(txq, raw_smp_processor_id()); \ |
| 513 | } \ |
| 514 | v->sq + qp; \ |
| 515 | }) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 516 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 517 | #define virtnet_xdp_put_sq(vi, q) { \ |
| 518 | struct netdev_queue *txq; \ |
| 519 | typeof(vi) v = (vi); \ |
| 520 | \ |
| 521 | txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \ |
| 522 | if (v->curr_queue_pairs > nr_cpu_ids) \ |
| 523 | __netif_tx_release(txq); \ |
| 524 | else \ |
| 525 | __netif_tx_unlock(txq); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 526 | } |
| 527 | |
| 528 | static int virtnet_xdp_xmit(struct net_device *dev, |
| 529 | int n, struct xdp_frame **frames, u32 flags) |
| 530 | { |
| 531 | struct virtnet_info *vi = netdev_priv(dev); |
| 532 | struct receive_queue *rq = vi->rq; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 533 | struct bpf_prog *xdp_prog; |
| 534 | struct send_queue *sq; |
| 535 | unsigned int len; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 536 | int packets = 0; |
| 537 | int bytes = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 538 | int drops = 0; |
| 539 | int kicks = 0; |
| 540 | int ret, err; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 541 | void *ptr; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 542 | int i; |
| 543 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 544 | /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this |
| 545 | * indicate XDP resources have been successfully allocated. |
| 546 | */ |
| 547 | xdp_prog = rcu_dereference(rq->xdp_prog); |
| 548 | if (!xdp_prog) |
| 549 | return -ENXIO; |
| 550 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 551 | sq = virtnet_xdp_get_sq(vi); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 552 | |
| 553 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { |
| 554 | ret = -EINVAL; |
| 555 | drops = n; |
| 556 | goto out; |
| 557 | } |
| 558 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 559 | /* Free up any pending old buffers before queueing new ones. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 560 | while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
| 561 | if (likely(is_xdp_frame(ptr))) { |
| 562 | struct xdp_frame *frame = ptr_to_xdp(ptr); |
| 563 | |
| 564 | bytes += frame->len; |
| 565 | xdp_return_frame(frame); |
| 566 | } else { |
| 567 | struct sk_buff *skb = ptr; |
| 568 | |
| 569 | bytes += skb->len; |
| 570 | napi_consume_skb(skb, false); |
| 571 | } |
| 572 | packets++; |
| 573 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 574 | |
| 575 | for (i = 0; i < n; i++) { |
| 576 | struct xdp_frame *xdpf = frames[i]; |
| 577 | |
| 578 | err = __virtnet_xdp_xmit_one(vi, sq, xdpf); |
| 579 | if (err) { |
| 580 | xdp_return_frame_rx_napi(xdpf); |
| 581 | drops++; |
| 582 | } |
| 583 | } |
| 584 | ret = n - drops; |
| 585 | |
| 586 | if (flags & XDP_XMIT_FLUSH) { |
| 587 | if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) |
| 588 | kicks = 1; |
| 589 | } |
| 590 | out: |
| 591 | u64_stats_update_begin(&sq->stats.syncp); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 592 | sq->stats.bytes += bytes; |
| 593 | sq->stats.packets += packets; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 594 | sq->stats.xdp_tx += n; |
| 595 | sq->stats.xdp_tx_drops += drops; |
| 596 | sq->stats.kicks += kicks; |
| 597 | u64_stats_update_end(&sq->stats.syncp); |
| 598 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 599 | virtnet_xdp_put_sq(vi, sq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 600 | return ret; |
| 601 | } |
| 602 | |
| 603 | static unsigned int virtnet_get_headroom(struct virtnet_info *vi) |
| 604 | { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 605 | return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 606 | } |
| 607 | |
| 608 | /* We copy the packet for XDP in the following cases: |
| 609 | * |
| 610 | * 1) Packet is scattered across multiple rx buffers. |
| 611 | * 2) Headroom space is insufficient. |
| 612 | * |
| 613 | * This is inefficient but it's a temporary condition that |
| 614 | * we hit right after XDP is enabled and until queue is refilled |
| 615 | * with large buffers with sufficient headroom - so it should affect |
| 616 | * at most queue size packets. |
| 617 | * Afterwards, the conditions to enable |
| 618 | * XDP should preclude the underlying device from sending packets |
| 619 | * across multiple buffers (num_buf > 1), and we make sure buffers |
| 620 | * have enough headroom. |
| 621 | */ |
| 622 | static struct page *xdp_linearize_page(struct receive_queue *rq, |
| 623 | u16 *num_buf, |
| 624 | struct page *p, |
| 625 | int offset, |
| 626 | int page_off, |
| 627 | unsigned int *len) |
| 628 | { |
| 629 | struct page *page = alloc_page(GFP_ATOMIC); |
| 630 | |
| 631 | if (!page) |
| 632 | return NULL; |
| 633 | |
| 634 | memcpy(page_address(page) + page_off, page_address(p) + offset, *len); |
| 635 | page_off += *len; |
| 636 | |
| 637 | while (--*num_buf) { |
| 638 | int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 639 | unsigned int buflen; |
| 640 | void *buf; |
| 641 | int off; |
| 642 | |
| 643 | buf = virtqueue_get_buf(rq->vq, &buflen); |
| 644 | if (unlikely(!buf)) |
| 645 | goto err_buf; |
| 646 | |
| 647 | p = virt_to_head_page(buf); |
| 648 | off = buf - page_address(p); |
| 649 | |
| 650 | /* guard against a misconfigured or uncooperative backend that |
| 651 | * is sending packet larger than the MTU. |
| 652 | */ |
| 653 | if ((page_off + buflen + tailroom) > PAGE_SIZE) { |
| 654 | put_page(p); |
| 655 | goto err_buf; |
| 656 | } |
| 657 | |
| 658 | memcpy(page_address(page) + page_off, |
| 659 | page_address(p) + off, buflen); |
| 660 | page_off += buflen; |
| 661 | put_page(p); |
| 662 | } |
| 663 | |
| 664 | /* Headroom does not contribute to packet length */ |
| 665 | *len = page_off - VIRTIO_XDP_HEADROOM; |
| 666 | return page; |
| 667 | err_buf: |
| 668 | __free_pages(page, 0); |
| 669 | return NULL; |
| 670 | } |
| 671 | |
| 672 | static struct sk_buff *receive_small(struct net_device *dev, |
| 673 | struct virtnet_info *vi, |
| 674 | struct receive_queue *rq, |
| 675 | void *buf, void *ctx, |
| 676 | unsigned int len, |
| 677 | unsigned int *xdp_xmit, |
| 678 | struct virtnet_rq_stats *stats) |
| 679 | { |
| 680 | struct sk_buff *skb; |
| 681 | struct bpf_prog *xdp_prog; |
| 682 | unsigned int xdp_headroom = (unsigned long)ctx; |
| 683 | unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; |
| 684 | unsigned int headroom = vi->hdr_len + header_offset; |
| 685 | unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + |
| 686 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 687 | struct page *page = virt_to_head_page(buf); |
| 688 | unsigned int delta = 0; |
| 689 | struct page *xdp_page; |
| 690 | int err; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 691 | unsigned int metasize = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 692 | |
| 693 | len -= vi->hdr_len; |
| 694 | stats->bytes += len; |
| 695 | |
| 696 | rcu_read_lock(); |
| 697 | xdp_prog = rcu_dereference(rq->xdp_prog); |
| 698 | if (xdp_prog) { |
| 699 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; |
| 700 | struct xdp_frame *xdpf; |
| 701 | struct xdp_buff xdp; |
| 702 | void *orig_data; |
| 703 | u32 act; |
| 704 | |
| 705 | if (unlikely(hdr->hdr.gso_type)) |
| 706 | goto err_xdp; |
| 707 | |
| 708 | if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { |
| 709 | int offset = buf - page_address(page) + header_offset; |
| 710 | unsigned int tlen = len + vi->hdr_len; |
| 711 | u16 num_buf = 1; |
| 712 | |
| 713 | xdp_headroom = virtnet_get_headroom(vi); |
| 714 | header_offset = VIRTNET_RX_PAD + xdp_headroom; |
| 715 | headroom = vi->hdr_len + header_offset; |
| 716 | buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + |
| 717 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 718 | xdp_page = xdp_linearize_page(rq, &num_buf, page, |
| 719 | offset, header_offset, |
| 720 | &tlen); |
| 721 | if (!xdp_page) |
| 722 | goto err_xdp; |
| 723 | |
| 724 | buf = page_address(xdp_page); |
| 725 | put_page(page); |
| 726 | page = xdp_page; |
| 727 | } |
| 728 | |
| 729 | xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; |
| 730 | xdp.data = xdp.data_hard_start + xdp_headroom; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 731 | xdp.data_end = xdp.data + len; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 732 | xdp.data_meta = xdp.data; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 733 | xdp.rxq = &rq->xdp_rxq; |
| 734 | orig_data = xdp.data; |
| 735 | act = bpf_prog_run_xdp(xdp_prog, &xdp); |
| 736 | stats->xdp_packets++; |
| 737 | |
| 738 | switch (act) { |
| 739 | case XDP_PASS: |
| 740 | /* Recalculate length in case bpf program changed it */ |
| 741 | delta = orig_data - xdp.data; |
| 742 | len = xdp.data_end - xdp.data; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 743 | metasize = xdp.data - xdp.data_meta; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 744 | break; |
| 745 | case XDP_TX: |
| 746 | stats->xdp_tx++; |
| 747 | xdpf = convert_to_xdp_frame(&xdp); |
| 748 | if (unlikely(!xdpf)) |
| 749 | goto err_xdp; |
| 750 | err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); |
| 751 | if (unlikely(err < 0)) { |
| 752 | trace_xdp_exception(vi->dev, xdp_prog, act); |
| 753 | goto err_xdp; |
| 754 | } |
| 755 | *xdp_xmit |= VIRTIO_XDP_TX; |
| 756 | rcu_read_unlock(); |
| 757 | goto xdp_xmit; |
| 758 | case XDP_REDIRECT: |
| 759 | stats->xdp_redirects++; |
| 760 | err = xdp_do_redirect(dev, &xdp, xdp_prog); |
| 761 | if (err) |
| 762 | goto err_xdp; |
| 763 | *xdp_xmit |= VIRTIO_XDP_REDIR; |
| 764 | rcu_read_unlock(); |
| 765 | goto xdp_xmit; |
| 766 | default: |
| 767 | bpf_warn_invalid_xdp_action(act); |
| 768 | /* fall through */ |
| 769 | case XDP_ABORTED: |
| 770 | trace_xdp_exception(vi->dev, xdp_prog, act); |
| 771 | case XDP_DROP: |
| 772 | goto err_xdp; |
| 773 | } |
| 774 | } |
| 775 | rcu_read_unlock(); |
| 776 | |
| 777 | skb = build_skb(buf, buflen); |
| 778 | if (!skb) { |
| 779 | put_page(page); |
| 780 | goto err; |
| 781 | } |
| 782 | skb_reserve(skb, headroom - delta); |
| 783 | skb_put(skb, len); |
| 784 | if (!delta) { |
| 785 | buf += header_offset; |
| 786 | memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); |
| 787 | } /* keep zeroed vnet hdr since packet was changed by bpf */ |
| 788 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 789 | if (metasize) |
| 790 | skb_metadata_set(skb, metasize); |
| 791 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 792 | err: |
| 793 | return skb; |
| 794 | |
| 795 | err_xdp: |
| 796 | rcu_read_unlock(); |
| 797 | stats->xdp_drops++; |
| 798 | stats->drops++; |
| 799 | put_page(page); |
| 800 | xdp_xmit: |
| 801 | return NULL; |
| 802 | } |
| 803 | |
| 804 | static struct sk_buff *receive_big(struct net_device *dev, |
| 805 | struct virtnet_info *vi, |
| 806 | struct receive_queue *rq, |
| 807 | void *buf, |
| 808 | unsigned int len, |
| 809 | struct virtnet_rq_stats *stats) |
| 810 | { |
| 811 | struct page *page = buf; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 812 | struct sk_buff *skb = |
| 813 | page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 814 | |
| 815 | stats->bytes += len - vi->hdr_len; |
| 816 | if (unlikely(!skb)) |
| 817 | goto err; |
| 818 | |
| 819 | return skb; |
| 820 | |
| 821 | err: |
| 822 | stats->drops++; |
| 823 | give_pages(rq, page); |
| 824 | return NULL; |
| 825 | } |
| 826 | |
| 827 | static struct sk_buff *receive_mergeable(struct net_device *dev, |
| 828 | struct virtnet_info *vi, |
| 829 | struct receive_queue *rq, |
| 830 | void *buf, |
| 831 | void *ctx, |
| 832 | unsigned int len, |
| 833 | unsigned int *xdp_xmit, |
| 834 | struct virtnet_rq_stats *stats) |
| 835 | { |
| 836 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf; |
| 837 | u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); |
| 838 | struct page *page = virt_to_head_page(buf); |
| 839 | int offset = buf - page_address(page); |
| 840 | struct sk_buff *head_skb, *curr_skb; |
| 841 | struct bpf_prog *xdp_prog; |
| 842 | unsigned int truesize; |
| 843 | unsigned int headroom = mergeable_ctx_to_headroom(ctx); |
| 844 | int err; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 845 | unsigned int metasize = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 846 | |
| 847 | head_skb = NULL; |
| 848 | stats->bytes += len - vi->hdr_len; |
| 849 | |
| 850 | rcu_read_lock(); |
| 851 | xdp_prog = rcu_dereference(rq->xdp_prog); |
| 852 | if (xdp_prog) { |
| 853 | struct xdp_frame *xdpf; |
| 854 | struct page *xdp_page; |
| 855 | struct xdp_buff xdp; |
| 856 | void *data; |
| 857 | u32 act; |
| 858 | |
| 859 | /* Transient failure which in theory could occur if |
| 860 | * in-flight packets from before XDP was enabled reach |
| 861 | * the receive path after XDP is loaded. |
| 862 | */ |
| 863 | if (unlikely(hdr->hdr.gso_type)) |
| 864 | goto err_xdp; |
| 865 | |
| 866 | /* This happens when rx buffer size is underestimated |
| 867 | * or headroom is not enough because of the buffer |
| 868 | * was refilled before XDP is set. This should only |
| 869 | * happen for the first several packets, so we don't |
| 870 | * care much about its performance. |
| 871 | */ |
| 872 | if (unlikely(num_buf > 1 || |
| 873 | headroom < virtnet_get_headroom(vi))) { |
| 874 | /* linearize data for XDP */ |
| 875 | xdp_page = xdp_linearize_page(rq, &num_buf, |
| 876 | page, offset, |
| 877 | VIRTIO_XDP_HEADROOM, |
| 878 | &len); |
| 879 | if (!xdp_page) |
| 880 | goto err_xdp; |
| 881 | offset = VIRTIO_XDP_HEADROOM; |
| 882 | } else { |
| 883 | xdp_page = page; |
| 884 | } |
| 885 | |
| 886 | /* Allow consuming headroom but reserve enough space to push |
| 887 | * the descriptor on if we get an XDP_TX return code. |
| 888 | */ |
| 889 | data = page_address(xdp_page) + offset; |
| 890 | xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len; |
| 891 | xdp.data = data + vi->hdr_len; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 892 | xdp.data_end = xdp.data + (len - vi->hdr_len); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 893 | xdp.data_meta = xdp.data; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 894 | xdp.rxq = &rq->xdp_rxq; |
| 895 | |
| 896 | act = bpf_prog_run_xdp(xdp_prog, &xdp); |
| 897 | stats->xdp_packets++; |
| 898 | |
| 899 | switch (act) { |
| 900 | case XDP_PASS: |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 901 | metasize = xdp.data - xdp.data_meta; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 902 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 903 | /* recalculate offset to account for any header |
| 904 | * adjustments and minus the metasize to copy the |
| 905 | * metadata in page_to_skb(). Note other cases do not |
| 906 | * build an skb and avoid using offset |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 907 | */ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 908 | offset = xdp.data - page_address(xdp_page) - |
| 909 | vi->hdr_len - metasize; |
| 910 | |
| 911 | /* recalculate len if xdp.data, xdp.data_end or |
| 912 | * xdp.data_meta were adjusted |
| 913 | */ |
| 914 | len = xdp.data_end - xdp.data + vi->hdr_len + metasize; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 915 | /* We can only create skb based on xdp_page. */ |
| 916 | if (unlikely(xdp_page != page)) { |
| 917 | rcu_read_unlock(); |
| 918 | put_page(page); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 919 | head_skb = page_to_skb(vi, rq, xdp_page, offset, |
| 920 | len, PAGE_SIZE, false, |
| 921 | metasize); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 922 | return head_skb; |
| 923 | } |
| 924 | break; |
| 925 | case XDP_TX: |
| 926 | stats->xdp_tx++; |
| 927 | xdpf = convert_to_xdp_frame(&xdp); |
| 928 | if (unlikely(!xdpf)) |
| 929 | goto err_xdp; |
| 930 | err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); |
| 931 | if (unlikely(err < 0)) { |
| 932 | trace_xdp_exception(vi->dev, xdp_prog, act); |
| 933 | if (unlikely(xdp_page != page)) |
| 934 | put_page(xdp_page); |
| 935 | goto err_xdp; |
| 936 | } |
| 937 | *xdp_xmit |= VIRTIO_XDP_TX; |
| 938 | if (unlikely(xdp_page != page)) |
| 939 | put_page(page); |
| 940 | rcu_read_unlock(); |
| 941 | goto xdp_xmit; |
| 942 | case XDP_REDIRECT: |
| 943 | stats->xdp_redirects++; |
| 944 | err = xdp_do_redirect(dev, &xdp, xdp_prog); |
| 945 | if (err) { |
| 946 | if (unlikely(xdp_page != page)) |
| 947 | put_page(xdp_page); |
| 948 | goto err_xdp; |
| 949 | } |
| 950 | *xdp_xmit |= VIRTIO_XDP_REDIR; |
| 951 | if (unlikely(xdp_page != page)) |
| 952 | put_page(page); |
| 953 | rcu_read_unlock(); |
| 954 | goto xdp_xmit; |
| 955 | default: |
| 956 | bpf_warn_invalid_xdp_action(act); |
| 957 | /* fall through */ |
| 958 | case XDP_ABORTED: |
| 959 | trace_xdp_exception(vi->dev, xdp_prog, act); |
| 960 | /* fall through */ |
| 961 | case XDP_DROP: |
| 962 | if (unlikely(xdp_page != page)) |
| 963 | __free_pages(xdp_page, 0); |
| 964 | goto err_xdp; |
| 965 | } |
| 966 | } |
| 967 | rcu_read_unlock(); |
| 968 | |
| 969 | truesize = mergeable_ctx_to_truesize(ctx); |
| 970 | if (unlikely(len > truesize)) { |
| 971 | pr_debug("%s: rx error: len %u exceeds truesize %lu\n", |
| 972 | dev->name, len, (unsigned long)ctx); |
| 973 | dev->stats.rx_length_errors++; |
| 974 | goto err_skb; |
| 975 | } |
| 976 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 977 | head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog, |
| 978 | metasize); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 979 | curr_skb = head_skb; |
| 980 | |
| 981 | if (unlikely(!curr_skb)) |
| 982 | goto err_skb; |
| 983 | while (--num_buf) { |
| 984 | int num_skb_frags; |
| 985 | |
| 986 | buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); |
| 987 | if (unlikely(!buf)) { |
| 988 | pr_debug("%s: rx error: %d buffers out of %d missing\n", |
| 989 | dev->name, num_buf, |
| 990 | virtio16_to_cpu(vi->vdev, |
| 991 | hdr->num_buffers)); |
| 992 | dev->stats.rx_length_errors++; |
| 993 | goto err_buf; |
| 994 | } |
| 995 | |
| 996 | stats->bytes += len; |
| 997 | page = virt_to_head_page(buf); |
| 998 | |
| 999 | truesize = mergeable_ctx_to_truesize(ctx); |
| 1000 | if (unlikely(len > truesize)) { |
| 1001 | pr_debug("%s: rx error: len %u exceeds truesize %lu\n", |
| 1002 | dev->name, len, (unsigned long)ctx); |
| 1003 | dev->stats.rx_length_errors++; |
| 1004 | goto err_skb; |
| 1005 | } |
| 1006 | |
| 1007 | num_skb_frags = skb_shinfo(curr_skb)->nr_frags; |
| 1008 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { |
| 1009 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); |
| 1010 | |
| 1011 | if (unlikely(!nskb)) |
| 1012 | goto err_skb; |
| 1013 | if (curr_skb == head_skb) |
| 1014 | skb_shinfo(curr_skb)->frag_list = nskb; |
| 1015 | else |
| 1016 | curr_skb->next = nskb; |
| 1017 | curr_skb = nskb; |
| 1018 | head_skb->truesize += nskb->truesize; |
| 1019 | num_skb_frags = 0; |
| 1020 | } |
| 1021 | if (curr_skb != head_skb) { |
| 1022 | head_skb->data_len += len; |
| 1023 | head_skb->len += len; |
| 1024 | head_skb->truesize += truesize; |
| 1025 | } |
| 1026 | offset = buf - page_address(page); |
| 1027 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { |
| 1028 | put_page(page); |
| 1029 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, |
| 1030 | len, truesize); |
| 1031 | } else { |
| 1032 | skb_add_rx_frag(curr_skb, num_skb_frags, page, |
| 1033 | offset, len, truesize); |
| 1034 | } |
| 1035 | } |
| 1036 | |
| 1037 | ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); |
| 1038 | return head_skb; |
| 1039 | |
| 1040 | err_xdp: |
| 1041 | rcu_read_unlock(); |
| 1042 | stats->xdp_drops++; |
| 1043 | err_skb: |
| 1044 | put_page(page); |
| 1045 | while (num_buf-- > 1) { |
| 1046 | buf = virtqueue_get_buf(rq->vq, &len); |
| 1047 | if (unlikely(!buf)) { |
| 1048 | pr_debug("%s: rx error: %d buffers missing\n", |
| 1049 | dev->name, num_buf); |
| 1050 | dev->stats.rx_length_errors++; |
| 1051 | break; |
| 1052 | } |
| 1053 | stats->bytes += len; |
| 1054 | page = virt_to_head_page(buf); |
| 1055 | put_page(page); |
| 1056 | } |
| 1057 | err_buf: |
| 1058 | stats->drops++; |
| 1059 | dev_kfree_skb(head_skb); |
| 1060 | xdp_xmit: |
| 1061 | return NULL; |
| 1062 | } |
| 1063 | |
| 1064 | static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, |
| 1065 | void *buf, unsigned int len, void **ctx, |
| 1066 | unsigned int *xdp_xmit, |
| 1067 | struct virtnet_rq_stats *stats) |
| 1068 | { |
| 1069 | struct net_device *dev = vi->dev; |
| 1070 | struct sk_buff *skb; |
| 1071 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
| 1072 | |
| 1073 | if (unlikely(len < vi->hdr_len + ETH_HLEN)) { |
| 1074 | pr_debug("%s: short packet %i\n", dev->name, len); |
| 1075 | dev->stats.rx_length_errors++; |
| 1076 | if (vi->mergeable_rx_bufs) { |
| 1077 | put_page(virt_to_head_page(buf)); |
| 1078 | } else if (vi->big_packets) { |
| 1079 | give_pages(rq, buf); |
| 1080 | } else { |
| 1081 | put_page(virt_to_head_page(buf)); |
| 1082 | } |
| 1083 | return; |
| 1084 | } |
| 1085 | |
| 1086 | if (vi->mergeable_rx_bufs) |
| 1087 | skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, |
| 1088 | stats); |
| 1089 | else if (vi->big_packets) |
| 1090 | skb = receive_big(dev, vi, rq, buf, len, stats); |
| 1091 | else |
| 1092 | skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); |
| 1093 | |
| 1094 | if (unlikely(!skb)) |
| 1095 | return; |
| 1096 | |
| 1097 | hdr = skb_vnet_hdr(skb); |
| 1098 | |
| 1099 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) |
| 1100 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 1101 | |
| 1102 | if (virtio_net_hdr_to_skb(skb, &hdr->hdr, |
| 1103 | virtio_is_little_endian(vi->vdev))) { |
| 1104 | net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", |
| 1105 | dev->name, hdr->hdr.gso_type, |
| 1106 | hdr->hdr.gso_size); |
| 1107 | goto frame_err; |
| 1108 | } |
| 1109 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1110 | skb_record_rx_queue(skb, vq2rxq(rq->vq)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1111 | skb->protocol = eth_type_trans(skb, dev); |
| 1112 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", |
| 1113 | ntohs(skb->protocol), skb->len, skb->pkt_type); |
| 1114 | |
| 1115 | napi_gro_receive(&rq->napi, skb); |
| 1116 | return; |
| 1117 | |
| 1118 | frame_err: |
| 1119 | dev->stats.rx_frame_errors++; |
| 1120 | dev_kfree_skb(skb); |
| 1121 | } |
| 1122 | |
| 1123 | /* Unlike mergeable buffers, all buffers are allocated to the |
| 1124 | * same size, except for the headroom. For this reason we do |
| 1125 | * not need to use mergeable_len_to_ctx here - it is enough |
| 1126 | * to store the headroom as the context ignoring the truesize. |
| 1127 | */ |
| 1128 | static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, |
| 1129 | gfp_t gfp) |
| 1130 | { |
| 1131 | struct page_frag *alloc_frag = &rq->alloc_frag; |
| 1132 | char *buf; |
| 1133 | unsigned int xdp_headroom = virtnet_get_headroom(vi); |
| 1134 | void *ctx = (void *)(unsigned long)xdp_headroom; |
| 1135 | int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; |
| 1136 | int err; |
| 1137 | |
| 1138 | len = SKB_DATA_ALIGN(len) + |
| 1139 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 1140 | if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) |
| 1141 | return -ENOMEM; |
| 1142 | |
| 1143 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; |
| 1144 | get_page(alloc_frag->page); |
| 1145 | alloc_frag->offset += len; |
| 1146 | sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom, |
| 1147 | vi->hdr_len + GOOD_PACKET_LEN); |
| 1148 | err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); |
| 1149 | if (err < 0) |
| 1150 | put_page(virt_to_head_page(buf)); |
| 1151 | return err; |
| 1152 | } |
| 1153 | |
| 1154 | static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, |
| 1155 | gfp_t gfp) |
| 1156 | { |
| 1157 | struct page *first, *list = NULL; |
| 1158 | char *p; |
| 1159 | int i, err, offset; |
| 1160 | |
| 1161 | sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); |
| 1162 | |
| 1163 | /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ |
| 1164 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { |
| 1165 | first = get_a_page(rq, gfp); |
| 1166 | if (!first) { |
| 1167 | if (list) |
| 1168 | give_pages(rq, list); |
| 1169 | return -ENOMEM; |
| 1170 | } |
| 1171 | sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); |
| 1172 | |
| 1173 | /* chain new page in list head to match sg */ |
| 1174 | first->private = (unsigned long)list; |
| 1175 | list = first; |
| 1176 | } |
| 1177 | |
| 1178 | first = get_a_page(rq, gfp); |
| 1179 | if (!first) { |
| 1180 | give_pages(rq, list); |
| 1181 | return -ENOMEM; |
| 1182 | } |
| 1183 | p = page_address(first); |
| 1184 | |
| 1185 | /* rq->sg[0], rq->sg[1] share the same page */ |
| 1186 | /* a separated rq->sg[0] for header - required in case !any_header_sg */ |
| 1187 | sg_set_buf(&rq->sg[0], p, vi->hdr_len); |
| 1188 | |
| 1189 | /* rq->sg[1] for data packet, from offset */ |
| 1190 | offset = sizeof(struct padded_vnet_hdr); |
| 1191 | sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); |
| 1192 | |
| 1193 | /* chain first in list head */ |
| 1194 | first->private = (unsigned long)list; |
| 1195 | err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, |
| 1196 | first, gfp); |
| 1197 | if (err < 0) |
| 1198 | give_pages(rq, first); |
| 1199 | |
| 1200 | return err; |
| 1201 | } |
| 1202 | |
| 1203 | static unsigned int get_mergeable_buf_len(struct receive_queue *rq, |
| 1204 | struct ewma_pkt_len *avg_pkt_len, |
| 1205 | unsigned int room) |
| 1206 | { |
| 1207 | const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
| 1208 | unsigned int len; |
| 1209 | |
| 1210 | if (room) |
| 1211 | return PAGE_SIZE - room; |
| 1212 | |
| 1213 | len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), |
| 1214 | rq->min_buf_len, PAGE_SIZE - hdr_len); |
| 1215 | |
| 1216 | return ALIGN(len, L1_CACHE_BYTES); |
| 1217 | } |
| 1218 | |
| 1219 | static int add_recvbuf_mergeable(struct virtnet_info *vi, |
| 1220 | struct receive_queue *rq, gfp_t gfp) |
| 1221 | { |
| 1222 | struct page_frag *alloc_frag = &rq->alloc_frag; |
| 1223 | unsigned int headroom = virtnet_get_headroom(vi); |
| 1224 | unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; |
| 1225 | unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); |
| 1226 | char *buf; |
| 1227 | void *ctx; |
| 1228 | int err; |
| 1229 | unsigned int len, hole; |
| 1230 | |
| 1231 | /* Extra tailroom is needed to satisfy XDP's assumption. This |
| 1232 | * means rx frags coalescing won't work, but consider we've |
| 1233 | * disabled GSO for XDP, it won't be a big issue. |
| 1234 | */ |
| 1235 | len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); |
| 1236 | if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp))) |
| 1237 | return -ENOMEM; |
| 1238 | |
| 1239 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; |
| 1240 | buf += headroom; /* advance address leaving hole at front of pkt */ |
| 1241 | get_page(alloc_frag->page); |
| 1242 | alloc_frag->offset += len + room; |
| 1243 | hole = alloc_frag->size - alloc_frag->offset; |
| 1244 | if (hole < len + room) { |
| 1245 | /* To avoid internal fragmentation, if there is very likely not |
| 1246 | * enough space for another buffer, add the remaining space to |
| 1247 | * the current buffer. |
| 1248 | */ |
| 1249 | len += hole; |
| 1250 | alloc_frag->offset += hole; |
| 1251 | } |
| 1252 | |
| 1253 | sg_init_one(rq->sg, buf, len); |
| 1254 | ctx = mergeable_len_to_ctx(len, headroom); |
| 1255 | err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); |
| 1256 | if (err < 0) |
| 1257 | put_page(virt_to_head_page(buf)); |
| 1258 | |
| 1259 | return err; |
| 1260 | } |
| 1261 | |
| 1262 | /* |
| 1263 | * Returns false if we couldn't fill entirely (OOM). |
| 1264 | * |
| 1265 | * Normally run in the receive path, but can also be run from ndo_open |
| 1266 | * before we're receiving packets, or from refill_work which is |
| 1267 | * careful to disable receiving (using napi_disable). |
| 1268 | */ |
| 1269 | static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, |
| 1270 | gfp_t gfp) |
| 1271 | { |
| 1272 | int err; |
| 1273 | bool oom; |
| 1274 | |
| 1275 | do { |
| 1276 | if (vi->mergeable_rx_bufs) |
| 1277 | err = add_recvbuf_mergeable(vi, rq, gfp); |
| 1278 | else if (vi->big_packets) |
| 1279 | err = add_recvbuf_big(vi, rq, gfp); |
| 1280 | else |
| 1281 | err = add_recvbuf_small(vi, rq, gfp); |
| 1282 | |
| 1283 | oom = err == -ENOMEM; |
| 1284 | if (err) |
| 1285 | break; |
| 1286 | } while (rq->vq->num_free); |
| 1287 | if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 1288 | unsigned long flags; |
| 1289 | |
| 1290 | flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1291 | rq->stats.kicks++; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 1292 | u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1293 | } |
| 1294 | |
| 1295 | return !oom; |
| 1296 | } |
| 1297 | |
| 1298 | static void skb_recv_done(struct virtqueue *rvq) |
| 1299 | { |
| 1300 | struct virtnet_info *vi = rvq->vdev->priv; |
| 1301 | struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; |
| 1302 | |
| 1303 | virtqueue_napi_schedule(&rq->napi, rvq); |
| 1304 | } |
| 1305 | |
| 1306 | static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) |
| 1307 | { |
| 1308 | napi_enable(napi); |
| 1309 | |
| 1310 | /* If all buffers were filled by other side before we napi_enabled, we |
| 1311 | * won't get another interrupt, so process any outstanding packets now. |
| 1312 | * Call local_bh_enable after to trigger softIRQ processing. |
| 1313 | */ |
| 1314 | local_bh_disable(); |
| 1315 | virtqueue_napi_schedule(napi, vq); |
| 1316 | local_bh_enable(); |
| 1317 | } |
| 1318 | |
| 1319 | static void virtnet_napi_tx_enable(struct virtnet_info *vi, |
| 1320 | struct virtqueue *vq, |
| 1321 | struct napi_struct *napi) |
| 1322 | { |
| 1323 | if (!napi->weight) |
| 1324 | return; |
| 1325 | |
| 1326 | /* Tx napi touches cachelines on the cpu handling tx interrupts. Only |
| 1327 | * enable the feature if this is likely affine with the transmit path. |
| 1328 | */ |
| 1329 | if (!vi->affinity_hint_set) { |
| 1330 | napi->weight = 0; |
| 1331 | return; |
| 1332 | } |
| 1333 | |
| 1334 | return virtnet_napi_enable(vq, napi); |
| 1335 | } |
| 1336 | |
| 1337 | static void virtnet_napi_tx_disable(struct napi_struct *napi) |
| 1338 | { |
| 1339 | if (napi->weight) |
| 1340 | napi_disable(napi); |
| 1341 | } |
| 1342 | |
| 1343 | static void refill_work(struct work_struct *work) |
| 1344 | { |
| 1345 | struct virtnet_info *vi = |
| 1346 | container_of(work, struct virtnet_info, refill.work); |
| 1347 | bool still_empty; |
| 1348 | int i; |
| 1349 | |
| 1350 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
| 1351 | struct receive_queue *rq = &vi->rq[i]; |
| 1352 | |
| 1353 | napi_disable(&rq->napi); |
| 1354 | still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); |
| 1355 | virtnet_napi_enable(rq->vq, &rq->napi); |
| 1356 | |
| 1357 | /* In theory, this can happen: if we don't get any buffers in |
| 1358 | * we will *never* try to fill again. |
| 1359 | */ |
| 1360 | if (still_empty) |
| 1361 | schedule_delayed_work(&vi->refill, HZ/2); |
| 1362 | } |
| 1363 | } |
| 1364 | |
| 1365 | static int virtnet_receive(struct receive_queue *rq, int budget, |
| 1366 | unsigned int *xdp_xmit) |
| 1367 | { |
| 1368 | struct virtnet_info *vi = rq->vq->vdev->priv; |
| 1369 | struct virtnet_rq_stats stats = {}; |
| 1370 | unsigned int len; |
| 1371 | void *buf; |
| 1372 | int i; |
| 1373 | |
| 1374 | if (!vi->big_packets || vi->mergeable_rx_bufs) { |
| 1375 | void *ctx; |
| 1376 | |
| 1377 | while (stats.packets < budget && |
| 1378 | (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { |
| 1379 | receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); |
| 1380 | stats.packets++; |
| 1381 | } |
| 1382 | } else { |
| 1383 | while (stats.packets < budget && |
| 1384 | (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { |
| 1385 | receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); |
| 1386 | stats.packets++; |
| 1387 | } |
| 1388 | } |
| 1389 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1390 | if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1391 | if (!try_fill_recv(vi, rq, GFP_ATOMIC)) |
| 1392 | schedule_delayed_work(&vi->refill, 0); |
| 1393 | } |
| 1394 | |
| 1395 | u64_stats_update_begin(&rq->stats.syncp); |
| 1396 | for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) { |
| 1397 | size_t offset = virtnet_rq_stats_desc[i].offset; |
| 1398 | u64 *item; |
| 1399 | |
| 1400 | item = (u64 *)((u8 *)&rq->stats + offset); |
| 1401 | *item += *(u64 *)((u8 *)&stats + offset); |
| 1402 | } |
| 1403 | u64_stats_update_end(&rq->stats.syncp); |
| 1404 | |
| 1405 | return stats.packets; |
| 1406 | } |
| 1407 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1408 | static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1409 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1410 | unsigned int len; |
| 1411 | unsigned int packets = 0; |
| 1412 | unsigned int bytes = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1413 | void *ptr; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1414 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1415 | while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
| 1416 | if (likely(!is_xdp_frame(ptr))) { |
| 1417 | struct sk_buff *skb = ptr; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1418 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1419 | pr_debug("Sent skb %p\n", skb); |
| 1420 | |
| 1421 | bytes += skb->len; |
| 1422 | napi_consume_skb(skb, in_napi); |
| 1423 | } else { |
| 1424 | struct xdp_frame *frame = ptr_to_xdp(ptr); |
| 1425 | |
| 1426 | bytes += frame->len; |
| 1427 | xdp_return_frame(frame); |
| 1428 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1429 | packets++; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1430 | } |
| 1431 | |
| 1432 | /* Avoid overhead when no packets have been processed |
| 1433 | * happens when called speculatively from start_xmit. |
| 1434 | */ |
| 1435 | if (!packets) |
| 1436 | return; |
| 1437 | |
| 1438 | u64_stats_update_begin(&sq->stats.syncp); |
| 1439 | sq->stats.bytes += bytes; |
| 1440 | sq->stats.packets += packets; |
| 1441 | u64_stats_update_end(&sq->stats.syncp); |
| 1442 | } |
| 1443 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1444 | static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) |
| 1445 | { |
| 1446 | if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) |
| 1447 | return false; |
| 1448 | else if (q < vi->curr_queue_pairs) |
| 1449 | return true; |
| 1450 | else |
| 1451 | return false; |
| 1452 | } |
| 1453 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1454 | static void virtnet_poll_cleantx(struct receive_queue *rq) |
| 1455 | { |
| 1456 | struct virtnet_info *vi = rq->vq->vdev->priv; |
| 1457 | unsigned int index = vq2rxq(rq->vq); |
| 1458 | struct send_queue *sq = &vi->sq[index]; |
| 1459 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); |
| 1460 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1461 | if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1462 | return; |
| 1463 | |
| 1464 | if (__netif_tx_trylock(txq)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1465 | free_old_xmit_skbs(sq, true); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1466 | __netif_tx_unlock(txq); |
| 1467 | } |
| 1468 | |
| 1469 | if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) |
| 1470 | netif_tx_wake_queue(txq); |
| 1471 | } |
| 1472 | |
| 1473 | static int virtnet_poll(struct napi_struct *napi, int budget) |
| 1474 | { |
| 1475 | struct receive_queue *rq = |
| 1476 | container_of(napi, struct receive_queue, napi); |
| 1477 | struct virtnet_info *vi = rq->vq->vdev->priv; |
| 1478 | struct send_queue *sq; |
| 1479 | unsigned int received; |
| 1480 | unsigned int xdp_xmit = 0; |
| 1481 | |
| 1482 | virtnet_poll_cleantx(rq); |
| 1483 | |
| 1484 | received = virtnet_receive(rq, budget, &xdp_xmit); |
| 1485 | |
| 1486 | /* Out of packets? */ |
| 1487 | if (received < budget) |
| 1488 | virtqueue_napi_complete(napi, rq->vq, received); |
| 1489 | |
| 1490 | if (xdp_xmit & VIRTIO_XDP_REDIR) |
| 1491 | xdp_do_flush_map(); |
| 1492 | |
| 1493 | if (xdp_xmit & VIRTIO_XDP_TX) { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 1494 | sq = virtnet_xdp_get_sq(vi); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1495 | if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { |
| 1496 | u64_stats_update_begin(&sq->stats.syncp); |
| 1497 | sq->stats.kicks++; |
| 1498 | u64_stats_update_end(&sq->stats.syncp); |
| 1499 | } |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 1500 | virtnet_xdp_put_sq(vi, sq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1501 | } |
| 1502 | |
| 1503 | return received; |
| 1504 | } |
| 1505 | |
| 1506 | static int virtnet_open(struct net_device *dev) |
| 1507 | { |
| 1508 | struct virtnet_info *vi = netdev_priv(dev); |
| 1509 | int i, err; |
| 1510 | |
| 1511 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1512 | if (i < vi->curr_queue_pairs) |
| 1513 | /* Make sure we have some buffers: if oom use wq. */ |
| 1514 | if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) |
| 1515 | schedule_delayed_work(&vi->refill, 0); |
| 1516 | |
| 1517 | err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i); |
| 1518 | if (err < 0) |
| 1519 | return err; |
| 1520 | |
| 1521 | err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq, |
| 1522 | MEM_TYPE_PAGE_SHARED, NULL); |
| 1523 | if (err < 0) { |
| 1524 | xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); |
| 1525 | return err; |
| 1526 | } |
| 1527 | |
| 1528 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
| 1529 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); |
| 1530 | } |
| 1531 | |
| 1532 | return 0; |
| 1533 | } |
| 1534 | |
| 1535 | static int virtnet_poll_tx(struct napi_struct *napi, int budget) |
| 1536 | { |
| 1537 | struct send_queue *sq = container_of(napi, struct send_queue, napi); |
| 1538 | struct virtnet_info *vi = sq->vq->vdev->priv; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1539 | unsigned int index = vq2txq(sq->vq); |
| 1540 | struct netdev_queue *txq; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 1541 | int opaque; |
| 1542 | bool done; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1543 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1544 | if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { |
| 1545 | /* We don't need to enable cb for XDP */ |
| 1546 | napi_complete_done(napi, 0); |
| 1547 | return 0; |
| 1548 | } |
| 1549 | |
| 1550 | txq = netdev_get_tx_queue(vi->dev, index); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1551 | __netif_tx_lock(txq, raw_smp_processor_id()); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 1552 | virtqueue_disable_cb(sq->vq); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1553 | free_old_xmit_skbs(sq, true); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 1554 | |
| 1555 | opaque = virtqueue_enable_cb_prepare(sq->vq); |
| 1556 | |
| 1557 | done = napi_complete_done(napi, 0); |
| 1558 | |
| 1559 | if (!done) |
| 1560 | virtqueue_disable_cb(sq->vq); |
| 1561 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1562 | __netif_tx_unlock(txq); |
| 1563 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 1564 | if (done) { |
| 1565 | if (unlikely(virtqueue_poll(sq->vq, opaque))) { |
| 1566 | if (napi_schedule_prep(napi)) { |
| 1567 | __netif_tx_lock(txq, raw_smp_processor_id()); |
| 1568 | virtqueue_disable_cb(sq->vq); |
| 1569 | __netif_tx_unlock(txq); |
| 1570 | __napi_schedule(napi); |
| 1571 | } |
| 1572 | } |
| 1573 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1574 | |
| 1575 | if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) |
| 1576 | netif_tx_wake_queue(txq); |
| 1577 | |
| 1578 | return 0; |
| 1579 | } |
| 1580 | |
| 1581 | static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) |
| 1582 | { |
| 1583 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
| 1584 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
| 1585 | struct virtnet_info *vi = sq->vq->vdev->priv; |
| 1586 | int num_sg; |
| 1587 | unsigned hdr_len = vi->hdr_len; |
| 1588 | bool can_push; |
| 1589 | |
| 1590 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); |
| 1591 | |
| 1592 | can_push = vi->any_header_sg && |
| 1593 | !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && |
| 1594 | !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; |
| 1595 | /* Even if we can, don't push here yet as this would skew |
| 1596 | * csum_start offset below. */ |
| 1597 | if (can_push) |
| 1598 | hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); |
| 1599 | else |
| 1600 | hdr = skb_vnet_hdr(skb); |
| 1601 | |
| 1602 | if (virtio_net_hdr_from_skb(skb, &hdr->hdr, |
| 1603 | virtio_is_little_endian(vi->vdev), false, |
| 1604 | 0)) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 1605 | return -EPROTO; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1606 | |
| 1607 | if (vi->mergeable_rx_bufs) |
| 1608 | hdr->num_buffers = 0; |
| 1609 | |
| 1610 | sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); |
| 1611 | if (can_push) { |
| 1612 | __skb_push(skb, hdr_len); |
| 1613 | num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); |
| 1614 | if (unlikely(num_sg < 0)) |
| 1615 | return num_sg; |
| 1616 | /* Pull header back to avoid skew in tx bytes calculations. */ |
| 1617 | __skb_pull(skb, hdr_len); |
| 1618 | } else { |
| 1619 | sg_set_buf(sq->sg, hdr, hdr_len); |
| 1620 | num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); |
| 1621 | if (unlikely(num_sg < 0)) |
| 1622 | return num_sg; |
| 1623 | num_sg++; |
| 1624 | } |
| 1625 | return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); |
| 1626 | } |
| 1627 | |
| 1628 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 1629 | { |
| 1630 | struct virtnet_info *vi = netdev_priv(dev); |
| 1631 | int qnum = skb_get_queue_mapping(skb); |
| 1632 | struct send_queue *sq = &vi->sq[qnum]; |
| 1633 | int err; |
| 1634 | struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1635 | bool kick = !netdev_xmit_more(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1636 | bool use_napi = sq->napi.weight; |
| 1637 | |
| 1638 | /* Free up any pending old buffers before queueing new ones. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1639 | free_old_xmit_skbs(sq, false); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1640 | |
| 1641 | if (use_napi && kick) |
| 1642 | virtqueue_enable_cb_delayed(sq->vq); |
| 1643 | |
| 1644 | /* timestamp packet in software */ |
| 1645 | skb_tx_timestamp(skb); |
| 1646 | |
| 1647 | /* Try to transmit */ |
| 1648 | err = xmit_skb(sq, skb); |
| 1649 | |
| 1650 | /* This should not happen! */ |
| 1651 | if (unlikely(err)) { |
| 1652 | dev->stats.tx_fifo_errors++; |
| 1653 | if (net_ratelimit()) |
| 1654 | dev_warn(&dev->dev, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1655 | "Unexpected TXQ (%d) queue failure: %d\n", |
| 1656 | qnum, err); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1657 | dev->stats.tx_dropped++; |
| 1658 | dev_kfree_skb_any(skb); |
| 1659 | return NETDEV_TX_OK; |
| 1660 | } |
| 1661 | |
| 1662 | /* Don't wait up for transmitted skbs to be freed. */ |
| 1663 | if (!use_napi) { |
| 1664 | skb_orphan(skb); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1665 | nf_reset_ct(skb); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1666 | } |
| 1667 | |
| 1668 | /* If running out of space, stop queue to avoid getting packets that we |
| 1669 | * are then unable to transmit. |
| 1670 | * An alternative would be to force queuing layer to requeue the skb by |
| 1671 | * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be |
| 1672 | * returned in a normal path of operation: it means that driver is not |
| 1673 | * maintaining the TX queue stop/start state properly, and causes |
| 1674 | * the stack to do a non-trivial amount of useless work. |
| 1675 | * Since most packets only take 1 or 2 ring slots, stopping the queue |
| 1676 | * early means 16 slots are typically wasted. |
| 1677 | */ |
| 1678 | if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { |
| 1679 | netif_stop_subqueue(dev, qnum); |
| 1680 | if (!use_napi && |
| 1681 | unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { |
| 1682 | /* More just got used, free them then recheck. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1683 | free_old_xmit_skbs(sq, false); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1684 | if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { |
| 1685 | netif_start_subqueue(dev, qnum); |
| 1686 | virtqueue_disable_cb(sq->vq); |
| 1687 | } |
| 1688 | } |
| 1689 | } |
| 1690 | |
| 1691 | if (kick || netif_xmit_stopped(txq)) { |
| 1692 | if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { |
| 1693 | u64_stats_update_begin(&sq->stats.syncp); |
| 1694 | sq->stats.kicks++; |
| 1695 | u64_stats_update_end(&sq->stats.syncp); |
| 1696 | } |
| 1697 | } |
| 1698 | |
| 1699 | return NETDEV_TX_OK; |
| 1700 | } |
| 1701 | |
| 1702 | /* |
| 1703 | * Send command via the control virtqueue and check status. Commands |
| 1704 | * supported by the hypervisor, as indicated by feature bits, should |
| 1705 | * never fail unless improperly formatted. |
| 1706 | */ |
| 1707 | static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, |
| 1708 | struct scatterlist *out) |
| 1709 | { |
| 1710 | struct scatterlist *sgs[4], hdr, stat; |
| 1711 | unsigned out_num = 0, tmp; |
| 1712 | |
| 1713 | /* Caller should know better */ |
| 1714 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); |
| 1715 | |
| 1716 | vi->ctrl->status = ~0; |
| 1717 | vi->ctrl->hdr.class = class; |
| 1718 | vi->ctrl->hdr.cmd = cmd; |
| 1719 | /* Add header */ |
| 1720 | sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); |
| 1721 | sgs[out_num++] = &hdr; |
| 1722 | |
| 1723 | if (out) |
| 1724 | sgs[out_num++] = out; |
| 1725 | |
| 1726 | /* Add return status. */ |
| 1727 | sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); |
| 1728 | sgs[out_num] = &stat; |
| 1729 | |
| 1730 | BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); |
| 1731 | virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); |
| 1732 | |
| 1733 | if (unlikely(!virtqueue_kick(vi->cvq))) |
| 1734 | return vi->ctrl->status == VIRTIO_NET_OK; |
| 1735 | |
| 1736 | /* Spin for a response, the kick causes an ioport write, trapping |
| 1737 | * into the hypervisor, so the request should be handled immediately. |
| 1738 | */ |
| 1739 | while (!virtqueue_get_buf(vi->cvq, &tmp) && |
| 1740 | !virtqueue_is_broken(vi->cvq)) |
| 1741 | cpu_relax(); |
| 1742 | |
| 1743 | return vi->ctrl->status == VIRTIO_NET_OK; |
| 1744 | } |
| 1745 | |
| 1746 | static int virtnet_set_mac_address(struct net_device *dev, void *p) |
| 1747 | { |
| 1748 | struct virtnet_info *vi = netdev_priv(dev); |
| 1749 | struct virtio_device *vdev = vi->vdev; |
| 1750 | int ret; |
| 1751 | struct sockaddr *addr; |
| 1752 | struct scatterlist sg; |
| 1753 | |
| 1754 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) |
| 1755 | return -EOPNOTSUPP; |
| 1756 | |
| 1757 | addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); |
| 1758 | if (!addr) |
| 1759 | return -ENOMEM; |
| 1760 | |
| 1761 | ret = eth_prepare_mac_addr_change(dev, addr); |
| 1762 | if (ret) |
| 1763 | goto out; |
| 1764 | |
| 1765 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { |
| 1766 | sg_init_one(&sg, addr->sa_data, dev->addr_len); |
| 1767 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, |
| 1768 | VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { |
| 1769 | dev_warn(&vdev->dev, |
| 1770 | "Failed to set mac address by vq command.\n"); |
| 1771 | ret = -EINVAL; |
| 1772 | goto out; |
| 1773 | } |
| 1774 | } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && |
| 1775 | !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { |
| 1776 | unsigned int i; |
| 1777 | |
| 1778 | /* Naturally, this has an atomicity problem. */ |
| 1779 | for (i = 0; i < dev->addr_len; i++) |
| 1780 | virtio_cwrite8(vdev, |
| 1781 | offsetof(struct virtio_net_config, mac) + |
| 1782 | i, addr->sa_data[i]); |
| 1783 | } |
| 1784 | |
| 1785 | eth_commit_mac_addr_change(dev, p); |
| 1786 | ret = 0; |
| 1787 | |
| 1788 | out: |
| 1789 | kfree(addr); |
| 1790 | return ret; |
| 1791 | } |
| 1792 | |
| 1793 | static void virtnet_stats(struct net_device *dev, |
| 1794 | struct rtnl_link_stats64 *tot) |
| 1795 | { |
| 1796 | struct virtnet_info *vi = netdev_priv(dev); |
| 1797 | unsigned int start; |
| 1798 | int i; |
| 1799 | |
| 1800 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1801 | u64 tpackets, tbytes, rpackets, rbytes, rdrops; |
| 1802 | struct receive_queue *rq = &vi->rq[i]; |
| 1803 | struct send_queue *sq = &vi->sq[i]; |
| 1804 | |
| 1805 | do { |
| 1806 | start = u64_stats_fetch_begin_irq(&sq->stats.syncp); |
| 1807 | tpackets = sq->stats.packets; |
| 1808 | tbytes = sq->stats.bytes; |
| 1809 | } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); |
| 1810 | |
| 1811 | do { |
| 1812 | start = u64_stats_fetch_begin_irq(&rq->stats.syncp); |
| 1813 | rpackets = rq->stats.packets; |
| 1814 | rbytes = rq->stats.bytes; |
| 1815 | rdrops = rq->stats.drops; |
| 1816 | } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); |
| 1817 | |
| 1818 | tot->rx_packets += rpackets; |
| 1819 | tot->tx_packets += tpackets; |
| 1820 | tot->rx_bytes += rbytes; |
| 1821 | tot->tx_bytes += tbytes; |
| 1822 | tot->rx_dropped += rdrops; |
| 1823 | } |
| 1824 | |
| 1825 | tot->tx_dropped = dev->stats.tx_dropped; |
| 1826 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; |
| 1827 | tot->rx_length_errors = dev->stats.rx_length_errors; |
| 1828 | tot->rx_frame_errors = dev->stats.rx_frame_errors; |
| 1829 | } |
| 1830 | |
| 1831 | static void virtnet_ack_link_announce(struct virtnet_info *vi) |
| 1832 | { |
| 1833 | rtnl_lock(); |
| 1834 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, |
| 1835 | VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) |
| 1836 | dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); |
| 1837 | rtnl_unlock(); |
| 1838 | } |
| 1839 | |
| 1840 | static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
| 1841 | { |
| 1842 | struct scatterlist sg; |
| 1843 | struct net_device *dev = vi->dev; |
| 1844 | |
| 1845 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) |
| 1846 | return 0; |
| 1847 | |
| 1848 | vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); |
| 1849 | sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); |
| 1850 | |
| 1851 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, |
| 1852 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { |
| 1853 | dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", |
| 1854 | queue_pairs); |
| 1855 | return -EINVAL; |
| 1856 | } else { |
| 1857 | vi->curr_queue_pairs = queue_pairs; |
| 1858 | /* virtnet_open() will refill when device is going to up. */ |
| 1859 | if (dev->flags & IFF_UP) |
| 1860 | schedule_delayed_work(&vi->refill, 0); |
| 1861 | } |
| 1862 | |
| 1863 | return 0; |
| 1864 | } |
| 1865 | |
| 1866 | static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
| 1867 | { |
| 1868 | int err; |
| 1869 | |
| 1870 | rtnl_lock(); |
| 1871 | err = _virtnet_set_queues(vi, queue_pairs); |
| 1872 | rtnl_unlock(); |
| 1873 | return err; |
| 1874 | } |
| 1875 | |
| 1876 | static int virtnet_close(struct net_device *dev) |
| 1877 | { |
| 1878 | struct virtnet_info *vi = netdev_priv(dev); |
| 1879 | int i; |
| 1880 | |
| 1881 | /* Make sure refill_work doesn't re-enable napi! */ |
| 1882 | cancel_delayed_work_sync(&vi->refill); |
| 1883 | |
| 1884 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1885 | xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); |
| 1886 | napi_disable(&vi->rq[i].napi); |
| 1887 | virtnet_napi_tx_disable(&vi->sq[i].napi); |
| 1888 | } |
| 1889 | |
| 1890 | return 0; |
| 1891 | } |
| 1892 | |
| 1893 | static void virtnet_set_rx_mode(struct net_device *dev) |
| 1894 | { |
| 1895 | struct virtnet_info *vi = netdev_priv(dev); |
| 1896 | struct scatterlist sg[2]; |
| 1897 | struct virtio_net_ctrl_mac *mac_data; |
| 1898 | struct netdev_hw_addr *ha; |
| 1899 | int uc_count; |
| 1900 | int mc_count; |
| 1901 | void *buf; |
| 1902 | int i; |
| 1903 | |
| 1904 | /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ |
| 1905 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) |
| 1906 | return; |
| 1907 | |
| 1908 | vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); |
| 1909 | vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); |
| 1910 | |
| 1911 | sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); |
| 1912 | |
| 1913 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
| 1914 | VIRTIO_NET_CTRL_RX_PROMISC, sg)) |
| 1915 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", |
| 1916 | vi->ctrl->promisc ? "en" : "dis"); |
| 1917 | |
| 1918 | sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); |
| 1919 | |
| 1920 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
| 1921 | VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) |
| 1922 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", |
| 1923 | vi->ctrl->allmulti ? "en" : "dis"); |
| 1924 | |
| 1925 | uc_count = netdev_uc_count(dev); |
| 1926 | mc_count = netdev_mc_count(dev); |
| 1927 | /* MAC filter - use one buffer for both lists */ |
| 1928 | buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + |
| 1929 | (2 * sizeof(mac_data->entries)), GFP_ATOMIC); |
| 1930 | mac_data = buf; |
| 1931 | if (!buf) |
| 1932 | return; |
| 1933 | |
| 1934 | sg_init_table(sg, 2); |
| 1935 | |
| 1936 | /* Store the unicast list and count in the front of the buffer */ |
| 1937 | mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); |
| 1938 | i = 0; |
| 1939 | netdev_for_each_uc_addr(ha, dev) |
| 1940 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
| 1941 | |
| 1942 | sg_set_buf(&sg[0], mac_data, |
| 1943 | sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); |
| 1944 | |
| 1945 | /* multicast list and count fill the end */ |
| 1946 | mac_data = (void *)&mac_data->macs[uc_count][0]; |
| 1947 | |
| 1948 | mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); |
| 1949 | i = 0; |
| 1950 | netdev_for_each_mc_addr(ha, dev) |
| 1951 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
| 1952 | |
| 1953 | sg_set_buf(&sg[1], mac_data, |
| 1954 | sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); |
| 1955 | |
| 1956 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, |
| 1957 | VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) |
| 1958 | dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); |
| 1959 | |
| 1960 | kfree(buf); |
| 1961 | } |
| 1962 | |
| 1963 | static int virtnet_vlan_rx_add_vid(struct net_device *dev, |
| 1964 | __be16 proto, u16 vid) |
| 1965 | { |
| 1966 | struct virtnet_info *vi = netdev_priv(dev); |
| 1967 | struct scatterlist sg; |
| 1968 | |
| 1969 | vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); |
| 1970 | sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); |
| 1971 | |
| 1972 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
| 1973 | VIRTIO_NET_CTRL_VLAN_ADD, &sg)) |
| 1974 | dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); |
| 1975 | return 0; |
| 1976 | } |
| 1977 | |
| 1978 | static int virtnet_vlan_rx_kill_vid(struct net_device *dev, |
| 1979 | __be16 proto, u16 vid) |
| 1980 | { |
| 1981 | struct virtnet_info *vi = netdev_priv(dev); |
| 1982 | struct scatterlist sg; |
| 1983 | |
| 1984 | vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); |
| 1985 | sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); |
| 1986 | |
| 1987 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
| 1988 | VIRTIO_NET_CTRL_VLAN_DEL, &sg)) |
| 1989 | dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); |
| 1990 | return 0; |
| 1991 | } |
| 1992 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1993 | static void virtnet_clean_affinity(struct virtnet_info *vi) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1994 | { |
| 1995 | int i; |
| 1996 | |
| 1997 | if (vi->affinity_hint_set) { |
| 1998 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 1999 | virtqueue_set_affinity(vi->rq[i].vq, NULL); |
| 2000 | virtqueue_set_affinity(vi->sq[i].vq, NULL); |
| 2001 | } |
| 2002 | |
| 2003 | vi->affinity_hint_set = false; |
| 2004 | } |
| 2005 | } |
| 2006 | |
| 2007 | static void virtnet_set_affinity(struct virtnet_info *vi) |
| 2008 | { |
| 2009 | cpumask_var_t mask; |
| 2010 | int stragglers; |
| 2011 | int group_size; |
| 2012 | int i, j, cpu; |
| 2013 | int num_cpu; |
| 2014 | int stride; |
| 2015 | |
| 2016 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2017 | virtnet_clean_affinity(vi); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2018 | return; |
| 2019 | } |
| 2020 | |
| 2021 | num_cpu = num_online_cpus(); |
| 2022 | stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); |
| 2023 | stragglers = num_cpu >= vi->curr_queue_pairs ? |
| 2024 | num_cpu % vi->curr_queue_pairs : |
| 2025 | 0; |
| 2026 | cpu = cpumask_next(-1, cpu_online_mask); |
| 2027 | |
| 2028 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
| 2029 | group_size = stride + (i < stragglers ? 1 : 0); |
| 2030 | |
| 2031 | for (j = 0; j < group_size; j++) { |
| 2032 | cpumask_set_cpu(cpu, mask); |
| 2033 | cpu = cpumask_next_wrap(cpu, cpu_online_mask, |
| 2034 | nr_cpu_ids, false); |
| 2035 | } |
| 2036 | virtqueue_set_affinity(vi->rq[i].vq, mask); |
| 2037 | virtqueue_set_affinity(vi->sq[i].vq, mask); |
| 2038 | __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, false); |
| 2039 | cpumask_clear(mask); |
| 2040 | } |
| 2041 | |
| 2042 | vi->affinity_hint_set = true; |
| 2043 | free_cpumask_var(mask); |
| 2044 | } |
| 2045 | |
| 2046 | static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) |
| 2047 | { |
| 2048 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, |
| 2049 | node); |
| 2050 | virtnet_set_affinity(vi); |
| 2051 | return 0; |
| 2052 | } |
| 2053 | |
| 2054 | static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) |
| 2055 | { |
| 2056 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, |
| 2057 | node_dead); |
| 2058 | virtnet_set_affinity(vi); |
| 2059 | return 0; |
| 2060 | } |
| 2061 | |
| 2062 | static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) |
| 2063 | { |
| 2064 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, |
| 2065 | node); |
| 2066 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2067 | virtnet_clean_affinity(vi); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2068 | return 0; |
| 2069 | } |
| 2070 | |
| 2071 | static enum cpuhp_state virtionet_online; |
| 2072 | |
| 2073 | static int virtnet_cpu_notif_add(struct virtnet_info *vi) |
| 2074 | { |
| 2075 | int ret; |
| 2076 | |
| 2077 | ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); |
| 2078 | if (ret) |
| 2079 | return ret; |
| 2080 | ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, |
| 2081 | &vi->node_dead); |
| 2082 | if (!ret) |
| 2083 | return ret; |
| 2084 | cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); |
| 2085 | return ret; |
| 2086 | } |
| 2087 | |
| 2088 | static void virtnet_cpu_notif_remove(struct virtnet_info *vi) |
| 2089 | { |
| 2090 | cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); |
| 2091 | cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, |
| 2092 | &vi->node_dead); |
| 2093 | } |
| 2094 | |
| 2095 | static void virtnet_get_ringparam(struct net_device *dev, |
| 2096 | struct ethtool_ringparam *ring) |
| 2097 | { |
| 2098 | struct virtnet_info *vi = netdev_priv(dev); |
| 2099 | |
| 2100 | ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); |
| 2101 | ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); |
| 2102 | ring->rx_pending = ring->rx_max_pending; |
| 2103 | ring->tx_pending = ring->tx_max_pending; |
| 2104 | } |
| 2105 | |
| 2106 | |
| 2107 | static void virtnet_get_drvinfo(struct net_device *dev, |
| 2108 | struct ethtool_drvinfo *info) |
| 2109 | { |
| 2110 | struct virtnet_info *vi = netdev_priv(dev); |
| 2111 | struct virtio_device *vdev = vi->vdev; |
| 2112 | |
| 2113 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); |
| 2114 | strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); |
| 2115 | strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); |
| 2116 | |
| 2117 | } |
| 2118 | |
| 2119 | /* TODO: Eliminate OOO packets during switching */ |
| 2120 | static int virtnet_set_channels(struct net_device *dev, |
| 2121 | struct ethtool_channels *channels) |
| 2122 | { |
| 2123 | struct virtnet_info *vi = netdev_priv(dev); |
| 2124 | u16 queue_pairs = channels->combined_count; |
| 2125 | int err; |
| 2126 | |
| 2127 | /* We don't support separate rx/tx channels. |
| 2128 | * We don't allow setting 'other' channels. |
| 2129 | */ |
| 2130 | if (channels->rx_count || channels->tx_count || channels->other_count) |
| 2131 | return -EINVAL; |
| 2132 | |
| 2133 | if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) |
| 2134 | return -EINVAL; |
| 2135 | |
| 2136 | /* For now we don't support modifying channels while XDP is loaded |
| 2137 | * also when XDP is loaded all RX queues have XDP programs so we only |
| 2138 | * need to check a single RX queue. |
| 2139 | */ |
| 2140 | if (vi->rq[0].xdp_prog) |
| 2141 | return -EINVAL; |
| 2142 | |
| 2143 | get_online_cpus(); |
| 2144 | err = _virtnet_set_queues(vi, queue_pairs); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 2145 | if (err) { |
| 2146 | put_online_cpus(); |
| 2147 | goto err; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2148 | } |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 2149 | virtnet_set_affinity(vi); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2150 | put_online_cpus(); |
| 2151 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 2152 | netif_set_real_num_tx_queues(dev, queue_pairs); |
| 2153 | netif_set_real_num_rx_queues(dev, queue_pairs); |
| 2154 | err: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2155 | return err; |
| 2156 | } |
| 2157 | |
| 2158 | static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data) |
| 2159 | { |
| 2160 | struct virtnet_info *vi = netdev_priv(dev); |
| 2161 | char *p = (char *)data; |
| 2162 | unsigned int i, j; |
| 2163 | |
| 2164 | switch (stringset) { |
| 2165 | case ETH_SS_STATS: |
| 2166 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
| 2167 | for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { |
| 2168 | snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s", |
| 2169 | i, virtnet_rq_stats_desc[j].desc); |
| 2170 | p += ETH_GSTRING_LEN; |
| 2171 | } |
| 2172 | } |
| 2173 | |
| 2174 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
| 2175 | for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { |
| 2176 | snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s", |
| 2177 | i, virtnet_sq_stats_desc[j].desc); |
| 2178 | p += ETH_GSTRING_LEN; |
| 2179 | } |
| 2180 | } |
| 2181 | break; |
| 2182 | } |
| 2183 | } |
| 2184 | |
| 2185 | static int virtnet_get_sset_count(struct net_device *dev, int sset) |
| 2186 | { |
| 2187 | struct virtnet_info *vi = netdev_priv(dev); |
| 2188 | |
| 2189 | switch (sset) { |
| 2190 | case ETH_SS_STATS: |
| 2191 | return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN + |
| 2192 | VIRTNET_SQ_STATS_LEN); |
| 2193 | default: |
| 2194 | return -EOPNOTSUPP; |
| 2195 | } |
| 2196 | } |
| 2197 | |
| 2198 | static void virtnet_get_ethtool_stats(struct net_device *dev, |
| 2199 | struct ethtool_stats *stats, u64 *data) |
| 2200 | { |
| 2201 | struct virtnet_info *vi = netdev_priv(dev); |
| 2202 | unsigned int idx = 0, start, i, j; |
| 2203 | const u8 *stats_base; |
| 2204 | size_t offset; |
| 2205 | |
| 2206 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
| 2207 | struct receive_queue *rq = &vi->rq[i]; |
| 2208 | |
| 2209 | stats_base = (u8 *)&rq->stats; |
| 2210 | do { |
| 2211 | start = u64_stats_fetch_begin_irq(&rq->stats.syncp); |
| 2212 | for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { |
| 2213 | offset = virtnet_rq_stats_desc[j].offset; |
| 2214 | data[idx + j] = *(u64 *)(stats_base + offset); |
| 2215 | } |
| 2216 | } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); |
| 2217 | idx += VIRTNET_RQ_STATS_LEN; |
| 2218 | } |
| 2219 | |
| 2220 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
| 2221 | struct send_queue *sq = &vi->sq[i]; |
| 2222 | |
| 2223 | stats_base = (u8 *)&sq->stats; |
| 2224 | do { |
| 2225 | start = u64_stats_fetch_begin_irq(&sq->stats.syncp); |
| 2226 | for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { |
| 2227 | offset = virtnet_sq_stats_desc[j].offset; |
| 2228 | data[idx + j] = *(u64 *)(stats_base + offset); |
| 2229 | } |
| 2230 | } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); |
| 2231 | idx += VIRTNET_SQ_STATS_LEN; |
| 2232 | } |
| 2233 | } |
| 2234 | |
| 2235 | static void virtnet_get_channels(struct net_device *dev, |
| 2236 | struct ethtool_channels *channels) |
| 2237 | { |
| 2238 | struct virtnet_info *vi = netdev_priv(dev); |
| 2239 | |
| 2240 | channels->combined_count = vi->curr_queue_pairs; |
| 2241 | channels->max_combined = vi->max_queue_pairs; |
| 2242 | channels->max_other = 0; |
| 2243 | channels->rx_count = 0; |
| 2244 | channels->tx_count = 0; |
| 2245 | channels->other_count = 0; |
| 2246 | } |
| 2247 | |
| 2248 | /* Check if the user is trying to change anything besides speed/duplex */ |
| 2249 | static bool |
| 2250 | virtnet_validate_ethtool_cmd(const struct ethtool_link_ksettings *cmd) |
| 2251 | { |
| 2252 | struct ethtool_link_ksettings diff1 = *cmd; |
| 2253 | struct ethtool_link_ksettings diff2 = {}; |
| 2254 | |
| 2255 | /* cmd is always set so we need to clear it, validate the port type |
| 2256 | * and also without autonegotiation we can ignore advertising |
| 2257 | */ |
| 2258 | diff1.base.speed = 0; |
| 2259 | diff2.base.port = PORT_OTHER; |
| 2260 | ethtool_link_ksettings_zero_link_mode(&diff1, advertising); |
| 2261 | diff1.base.duplex = 0; |
| 2262 | diff1.base.cmd = 0; |
| 2263 | diff1.base.link_mode_masks_nwords = 0; |
| 2264 | |
| 2265 | return !memcmp(&diff1.base, &diff2.base, sizeof(diff1.base)) && |
| 2266 | bitmap_empty(diff1.link_modes.supported, |
| 2267 | __ETHTOOL_LINK_MODE_MASK_NBITS) && |
| 2268 | bitmap_empty(diff1.link_modes.advertising, |
| 2269 | __ETHTOOL_LINK_MODE_MASK_NBITS) && |
| 2270 | bitmap_empty(diff1.link_modes.lp_advertising, |
| 2271 | __ETHTOOL_LINK_MODE_MASK_NBITS); |
| 2272 | } |
| 2273 | |
| 2274 | static int virtnet_set_link_ksettings(struct net_device *dev, |
| 2275 | const struct ethtool_link_ksettings *cmd) |
| 2276 | { |
| 2277 | struct virtnet_info *vi = netdev_priv(dev); |
| 2278 | u32 speed; |
| 2279 | |
| 2280 | speed = cmd->base.speed; |
| 2281 | /* don't allow custom speed and duplex */ |
| 2282 | if (!ethtool_validate_speed(speed) || |
| 2283 | !ethtool_validate_duplex(cmd->base.duplex) || |
| 2284 | !virtnet_validate_ethtool_cmd(cmd)) |
| 2285 | return -EINVAL; |
| 2286 | vi->speed = speed; |
| 2287 | vi->duplex = cmd->base.duplex; |
| 2288 | |
| 2289 | return 0; |
| 2290 | } |
| 2291 | |
| 2292 | static int virtnet_get_link_ksettings(struct net_device *dev, |
| 2293 | struct ethtool_link_ksettings *cmd) |
| 2294 | { |
| 2295 | struct virtnet_info *vi = netdev_priv(dev); |
| 2296 | |
| 2297 | cmd->base.speed = vi->speed; |
| 2298 | cmd->base.duplex = vi->duplex; |
| 2299 | cmd->base.port = PORT_OTHER; |
| 2300 | |
| 2301 | return 0; |
| 2302 | } |
| 2303 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2304 | static int virtnet_set_coalesce(struct net_device *dev, |
| 2305 | struct ethtool_coalesce *ec) |
| 2306 | { |
| 2307 | struct ethtool_coalesce ec_default = { |
| 2308 | .cmd = ETHTOOL_SCOALESCE, |
| 2309 | .rx_max_coalesced_frames = 1, |
| 2310 | }; |
| 2311 | struct virtnet_info *vi = netdev_priv(dev); |
| 2312 | int i, napi_weight; |
| 2313 | |
| 2314 | if (ec->tx_max_coalesced_frames > 1) |
| 2315 | return -EINVAL; |
| 2316 | |
| 2317 | ec_default.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; |
| 2318 | napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; |
| 2319 | |
| 2320 | /* disallow changes to fields not explicitly tested above */ |
| 2321 | if (memcmp(ec, &ec_default, sizeof(ec_default))) |
| 2322 | return -EINVAL; |
| 2323 | |
| 2324 | if (napi_weight ^ vi->sq[0].napi.weight) { |
| 2325 | if (dev->flags & IFF_UP) |
| 2326 | return -EBUSY; |
| 2327 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 2328 | vi->sq[i].napi.weight = napi_weight; |
| 2329 | } |
| 2330 | |
| 2331 | return 0; |
| 2332 | } |
| 2333 | |
| 2334 | static int virtnet_get_coalesce(struct net_device *dev, |
| 2335 | struct ethtool_coalesce *ec) |
| 2336 | { |
| 2337 | struct ethtool_coalesce ec_default = { |
| 2338 | .cmd = ETHTOOL_GCOALESCE, |
| 2339 | .rx_max_coalesced_frames = 1, |
| 2340 | }; |
| 2341 | struct virtnet_info *vi = netdev_priv(dev); |
| 2342 | |
| 2343 | memcpy(ec, &ec_default, sizeof(ec_default)); |
| 2344 | |
| 2345 | if (vi->sq[0].napi.weight) |
| 2346 | ec->tx_max_coalesced_frames = 1; |
| 2347 | |
| 2348 | return 0; |
| 2349 | } |
| 2350 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2351 | static void virtnet_init_settings(struct net_device *dev) |
| 2352 | { |
| 2353 | struct virtnet_info *vi = netdev_priv(dev); |
| 2354 | |
| 2355 | vi->speed = SPEED_UNKNOWN; |
| 2356 | vi->duplex = DUPLEX_UNKNOWN; |
| 2357 | } |
| 2358 | |
| 2359 | static void virtnet_update_settings(struct virtnet_info *vi) |
| 2360 | { |
| 2361 | u32 speed; |
| 2362 | u8 duplex; |
| 2363 | |
| 2364 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) |
| 2365 | return; |
| 2366 | |
| 2367 | speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config, |
| 2368 | speed)); |
| 2369 | if (ethtool_validate_speed(speed)) |
| 2370 | vi->speed = speed; |
| 2371 | duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config, |
| 2372 | duplex)); |
| 2373 | if (ethtool_validate_duplex(duplex)) |
| 2374 | vi->duplex = duplex; |
| 2375 | } |
| 2376 | |
| 2377 | static const struct ethtool_ops virtnet_ethtool_ops = { |
| 2378 | .get_drvinfo = virtnet_get_drvinfo, |
| 2379 | .get_link = ethtool_op_get_link, |
| 2380 | .get_ringparam = virtnet_get_ringparam, |
| 2381 | .get_strings = virtnet_get_strings, |
| 2382 | .get_sset_count = virtnet_get_sset_count, |
| 2383 | .get_ethtool_stats = virtnet_get_ethtool_stats, |
| 2384 | .set_channels = virtnet_set_channels, |
| 2385 | .get_channels = virtnet_get_channels, |
| 2386 | .get_ts_info = ethtool_op_get_ts_info, |
| 2387 | .get_link_ksettings = virtnet_get_link_ksettings, |
| 2388 | .set_link_ksettings = virtnet_set_link_ksettings, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2389 | .set_coalesce = virtnet_set_coalesce, |
| 2390 | .get_coalesce = virtnet_get_coalesce, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2391 | }; |
| 2392 | |
| 2393 | static void virtnet_freeze_down(struct virtio_device *vdev) |
| 2394 | { |
| 2395 | struct virtnet_info *vi = vdev->priv; |
| 2396 | int i; |
| 2397 | |
| 2398 | /* Make sure no work handler is accessing the device */ |
| 2399 | flush_work(&vi->config_work); |
| 2400 | |
| 2401 | netif_tx_lock_bh(vi->dev); |
| 2402 | netif_device_detach(vi->dev); |
| 2403 | netif_tx_unlock_bh(vi->dev); |
| 2404 | cancel_delayed_work_sync(&vi->refill); |
| 2405 | |
| 2406 | if (netif_running(vi->dev)) { |
| 2407 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2408 | napi_disable(&vi->rq[i].napi); |
| 2409 | virtnet_napi_tx_disable(&vi->sq[i].napi); |
| 2410 | } |
| 2411 | } |
| 2412 | } |
| 2413 | |
| 2414 | static int init_vqs(struct virtnet_info *vi); |
| 2415 | |
| 2416 | static int virtnet_restore_up(struct virtio_device *vdev) |
| 2417 | { |
| 2418 | struct virtnet_info *vi = vdev->priv; |
| 2419 | int err, i; |
| 2420 | |
| 2421 | err = init_vqs(vi); |
| 2422 | if (err) |
| 2423 | return err; |
| 2424 | |
| 2425 | virtio_device_ready(vdev); |
| 2426 | |
| 2427 | if (netif_running(vi->dev)) { |
| 2428 | for (i = 0; i < vi->curr_queue_pairs; i++) |
| 2429 | if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) |
| 2430 | schedule_delayed_work(&vi->refill, 0); |
| 2431 | |
| 2432 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2433 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
| 2434 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, |
| 2435 | &vi->sq[i].napi); |
| 2436 | } |
| 2437 | } |
| 2438 | |
| 2439 | netif_tx_lock_bh(vi->dev); |
| 2440 | netif_device_attach(vi->dev); |
| 2441 | netif_tx_unlock_bh(vi->dev); |
| 2442 | return err; |
| 2443 | } |
| 2444 | |
| 2445 | static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) |
| 2446 | { |
| 2447 | struct scatterlist sg; |
| 2448 | vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); |
| 2449 | |
| 2450 | sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); |
| 2451 | |
| 2452 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, |
| 2453 | VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2454 | dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2455 | return -EINVAL; |
| 2456 | } |
| 2457 | |
| 2458 | return 0; |
| 2459 | } |
| 2460 | |
| 2461 | static int virtnet_clear_guest_offloads(struct virtnet_info *vi) |
| 2462 | { |
| 2463 | u64 offloads = 0; |
| 2464 | |
| 2465 | if (!vi->guest_offloads) |
| 2466 | return 0; |
| 2467 | |
| 2468 | return virtnet_set_guest_offloads(vi, offloads); |
| 2469 | } |
| 2470 | |
| 2471 | static int virtnet_restore_guest_offloads(struct virtnet_info *vi) |
| 2472 | { |
| 2473 | u64 offloads = vi->guest_offloads; |
| 2474 | |
| 2475 | if (!vi->guest_offloads) |
| 2476 | return 0; |
| 2477 | |
| 2478 | return virtnet_set_guest_offloads(vi, offloads); |
| 2479 | } |
| 2480 | |
| 2481 | static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, |
| 2482 | struct netlink_ext_ack *extack) |
| 2483 | { |
| 2484 | unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr); |
| 2485 | struct virtnet_info *vi = netdev_priv(dev); |
| 2486 | struct bpf_prog *old_prog; |
| 2487 | u16 xdp_qp = 0, curr_qp; |
| 2488 | int i, err; |
| 2489 | |
| 2490 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
| 2491 | && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || |
| 2492 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || |
| 2493 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || |
| 2494 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || |
| 2495 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 2496 | NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2497 | return -EOPNOTSUPP; |
| 2498 | } |
| 2499 | |
| 2500 | if (vi->mergeable_rx_bufs && !vi->any_header_sg) { |
| 2501 | NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required"); |
| 2502 | return -EINVAL; |
| 2503 | } |
| 2504 | |
| 2505 | if (dev->mtu > max_sz) { |
| 2506 | NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP"); |
| 2507 | netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz); |
| 2508 | return -EINVAL; |
| 2509 | } |
| 2510 | |
| 2511 | curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; |
| 2512 | if (prog) |
| 2513 | xdp_qp = nr_cpu_ids; |
| 2514 | |
| 2515 | /* XDP requires extra queues for XDP_TX */ |
| 2516 | if (curr_qp + xdp_qp > vi->max_queue_pairs) { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 2517 | netdev_warn(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2518 | curr_qp + xdp_qp, vi->max_queue_pairs); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 2519 | xdp_qp = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2520 | } |
| 2521 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2522 | old_prog = rtnl_dereference(vi->rq[0].xdp_prog); |
| 2523 | if (!prog && !old_prog) |
| 2524 | return 0; |
| 2525 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2526 | if (prog) { |
| 2527 | prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); |
| 2528 | if (IS_ERR(prog)) |
| 2529 | return PTR_ERR(prog); |
| 2530 | } |
| 2531 | |
| 2532 | /* Make sure NAPI is not using any XDP TX queues for RX. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2533 | if (netif_running(dev)) { |
| 2534 | for (i = 0; i < vi->max_queue_pairs; i++) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2535 | napi_disable(&vi->rq[i].napi); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2536 | virtnet_napi_tx_disable(&vi->sq[i].napi); |
| 2537 | } |
| 2538 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2539 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2540 | if (!prog) { |
| 2541 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2542 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); |
| 2543 | if (i == 0) |
| 2544 | virtnet_restore_guest_offloads(vi); |
| 2545 | } |
| 2546 | synchronize_net(); |
| 2547 | } |
| 2548 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2549 | err = _virtnet_set_queues(vi, curr_qp + xdp_qp); |
| 2550 | if (err) |
| 2551 | goto err; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2552 | netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2553 | vi->xdp_queue_pairs = xdp_qp; |
| 2554 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2555 | if (prog) { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 2556 | vi->xdp_enabled = true; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2557 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2558 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); |
| 2559 | if (i == 0 && !old_prog) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2560 | virtnet_clear_guest_offloads(vi); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2561 | } |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 2562 | } else { |
| 2563 | vi->xdp_enabled = false; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2564 | } |
| 2565 | |
| 2566 | for (i = 0; i < vi->max_queue_pairs; i++) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2567 | if (old_prog) |
| 2568 | bpf_prog_put(old_prog); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2569 | if (netif_running(dev)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2570 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2571 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, |
| 2572 | &vi->sq[i].napi); |
| 2573 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2574 | } |
| 2575 | |
| 2576 | return 0; |
| 2577 | |
| 2578 | err: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2579 | if (!prog) { |
| 2580 | virtnet_clear_guest_offloads(vi); |
| 2581 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 2582 | rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); |
| 2583 | } |
| 2584 | |
| 2585 | if (netif_running(dev)) { |
| 2586 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2587 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
| 2588 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, |
| 2589 | &vi->sq[i].napi); |
| 2590 | } |
| 2591 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2592 | if (prog) |
| 2593 | bpf_prog_sub(prog, vi->max_queue_pairs - 1); |
| 2594 | return err; |
| 2595 | } |
| 2596 | |
| 2597 | static u32 virtnet_xdp_query(struct net_device *dev) |
| 2598 | { |
| 2599 | struct virtnet_info *vi = netdev_priv(dev); |
| 2600 | const struct bpf_prog *xdp_prog; |
| 2601 | int i; |
| 2602 | |
| 2603 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2604 | xdp_prog = rtnl_dereference(vi->rq[i].xdp_prog); |
| 2605 | if (xdp_prog) |
| 2606 | return xdp_prog->aux->id; |
| 2607 | } |
| 2608 | return 0; |
| 2609 | } |
| 2610 | |
| 2611 | static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
| 2612 | { |
| 2613 | switch (xdp->command) { |
| 2614 | case XDP_SETUP_PROG: |
| 2615 | return virtnet_xdp_set(dev, xdp->prog, xdp->extack); |
| 2616 | case XDP_QUERY_PROG: |
| 2617 | xdp->prog_id = virtnet_xdp_query(dev); |
| 2618 | return 0; |
| 2619 | default: |
| 2620 | return -EINVAL; |
| 2621 | } |
| 2622 | } |
| 2623 | |
| 2624 | static int virtnet_get_phys_port_name(struct net_device *dev, char *buf, |
| 2625 | size_t len) |
| 2626 | { |
| 2627 | struct virtnet_info *vi = netdev_priv(dev); |
| 2628 | int ret; |
| 2629 | |
| 2630 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) |
| 2631 | return -EOPNOTSUPP; |
| 2632 | |
| 2633 | ret = snprintf(buf, len, "sby"); |
| 2634 | if (ret >= len) |
| 2635 | return -EOPNOTSUPP; |
| 2636 | |
| 2637 | return 0; |
| 2638 | } |
| 2639 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2640 | static int virtnet_set_features(struct net_device *dev, |
| 2641 | netdev_features_t features) |
| 2642 | { |
| 2643 | struct virtnet_info *vi = netdev_priv(dev); |
| 2644 | u64 offloads; |
| 2645 | int err; |
| 2646 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 2647 | if ((dev->features ^ features) & NETIF_F_GRO_HW) { |
| 2648 | if (vi->xdp_enabled) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2649 | return -EBUSY; |
| 2650 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 2651 | if (features & NETIF_F_GRO_HW) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2652 | offloads = vi->guest_offloads_capable; |
| 2653 | else |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 2654 | offloads = vi->guest_offloads_capable & |
| 2655 | ~GUEST_OFFLOAD_GRO_HW_MASK; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2656 | |
| 2657 | err = virtnet_set_guest_offloads(vi, offloads); |
| 2658 | if (err) |
| 2659 | return err; |
| 2660 | vi->guest_offloads = offloads; |
| 2661 | } |
| 2662 | |
| 2663 | return 0; |
| 2664 | } |
| 2665 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2666 | static const struct net_device_ops virtnet_netdev = { |
| 2667 | .ndo_open = virtnet_open, |
| 2668 | .ndo_stop = virtnet_close, |
| 2669 | .ndo_start_xmit = start_xmit, |
| 2670 | .ndo_validate_addr = eth_validate_addr, |
| 2671 | .ndo_set_mac_address = virtnet_set_mac_address, |
| 2672 | .ndo_set_rx_mode = virtnet_set_rx_mode, |
| 2673 | .ndo_get_stats64 = virtnet_stats, |
| 2674 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, |
| 2675 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, |
| 2676 | .ndo_bpf = virtnet_xdp, |
| 2677 | .ndo_xdp_xmit = virtnet_xdp_xmit, |
| 2678 | .ndo_features_check = passthru_features_check, |
| 2679 | .ndo_get_phys_port_name = virtnet_get_phys_port_name, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2680 | .ndo_set_features = virtnet_set_features, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2681 | }; |
| 2682 | |
| 2683 | static void virtnet_config_changed_work(struct work_struct *work) |
| 2684 | { |
| 2685 | struct virtnet_info *vi = |
| 2686 | container_of(work, struct virtnet_info, config_work); |
| 2687 | u16 v; |
| 2688 | |
| 2689 | if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, |
| 2690 | struct virtio_net_config, status, &v) < 0) |
| 2691 | return; |
| 2692 | |
| 2693 | if (v & VIRTIO_NET_S_ANNOUNCE) { |
| 2694 | netdev_notify_peers(vi->dev); |
| 2695 | virtnet_ack_link_announce(vi); |
| 2696 | } |
| 2697 | |
| 2698 | /* Ignore unknown (future) status bits */ |
| 2699 | v &= VIRTIO_NET_S_LINK_UP; |
| 2700 | |
| 2701 | if (vi->status == v) |
| 2702 | return; |
| 2703 | |
| 2704 | vi->status = v; |
| 2705 | |
| 2706 | if (vi->status & VIRTIO_NET_S_LINK_UP) { |
| 2707 | virtnet_update_settings(vi); |
| 2708 | netif_carrier_on(vi->dev); |
| 2709 | netif_tx_wake_all_queues(vi->dev); |
| 2710 | } else { |
| 2711 | netif_carrier_off(vi->dev); |
| 2712 | netif_tx_stop_all_queues(vi->dev); |
| 2713 | } |
| 2714 | } |
| 2715 | |
| 2716 | static void virtnet_config_changed(struct virtio_device *vdev) |
| 2717 | { |
| 2718 | struct virtnet_info *vi = vdev->priv; |
| 2719 | |
| 2720 | schedule_work(&vi->config_work); |
| 2721 | } |
| 2722 | |
| 2723 | static void virtnet_free_queues(struct virtnet_info *vi) |
| 2724 | { |
| 2725 | int i; |
| 2726 | |
| 2727 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2728 | napi_hash_del(&vi->rq[i].napi); |
| 2729 | netif_napi_del(&vi->rq[i].napi); |
| 2730 | netif_napi_del(&vi->sq[i].napi); |
| 2731 | } |
| 2732 | |
| 2733 | /* We called napi_hash_del() before netif_napi_del(), |
| 2734 | * we need to respect an RCU grace period before freeing vi->rq |
| 2735 | */ |
| 2736 | synchronize_net(); |
| 2737 | |
| 2738 | kfree(vi->rq); |
| 2739 | kfree(vi->sq); |
| 2740 | kfree(vi->ctrl); |
| 2741 | } |
| 2742 | |
| 2743 | static void _free_receive_bufs(struct virtnet_info *vi) |
| 2744 | { |
| 2745 | struct bpf_prog *old_prog; |
| 2746 | int i; |
| 2747 | |
| 2748 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2749 | while (vi->rq[i].pages) |
| 2750 | __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); |
| 2751 | |
| 2752 | old_prog = rtnl_dereference(vi->rq[i].xdp_prog); |
| 2753 | RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); |
| 2754 | if (old_prog) |
| 2755 | bpf_prog_put(old_prog); |
| 2756 | } |
| 2757 | } |
| 2758 | |
| 2759 | static void free_receive_bufs(struct virtnet_info *vi) |
| 2760 | { |
| 2761 | rtnl_lock(); |
| 2762 | _free_receive_bufs(vi); |
| 2763 | rtnl_unlock(); |
| 2764 | } |
| 2765 | |
| 2766 | static void free_receive_page_frags(struct virtnet_info *vi) |
| 2767 | { |
| 2768 | int i; |
| 2769 | for (i = 0; i < vi->max_queue_pairs; i++) |
| 2770 | if (vi->rq[i].alloc_frag.page) |
| 2771 | put_page(vi->rq[i].alloc_frag.page); |
| 2772 | } |
| 2773 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2774 | static void free_unused_bufs(struct virtnet_info *vi) |
| 2775 | { |
| 2776 | void *buf; |
| 2777 | int i; |
| 2778 | |
| 2779 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2780 | struct virtqueue *vq = vi->sq[i].vq; |
| 2781 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2782 | if (!is_xdp_frame(buf)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2783 | dev_kfree_skb(buf); |
| 2784 | else |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2785 | xdp_return_frame(ptr_to_xdp(buf)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2786 | } |
| 2787 | } |
| 2788 | |
| 2789 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2790 | struct virtqueue *vq = vi->rq[i].vq; |
| 2791 | |
| 2792 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
| 2793 | if (vi->mergeable_rx_bufs) { |
| 2794 | put_page(virt_to_head_page(buf)); |
| 2795 | } else if (vi->big_packets) { |
| 2796 | give_pages(&vi->rq[i], buf); |
| 2797 | } else { |
| 2798 | put_page(virt_to_head_page(buf)); |
| 2799 | } |
| 2800 | } |
| 2801 | } |
| 2802 | } |
| 2803 | |
| 2804 | static void virtnet_del_vqs(struct virtnet_info *vi) |
| 2805 | { |
| 2806 | struct virtio_device *vdev = vi->vdev; |
| 2807 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2808 | virtnet_clean_affinity(vi); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2809 | |
| 2810 | vdev->config->del_vqs(vdev); |
| 2811 | |
| 2812 | virtnet_free_queues(vi); |
| 2813 | } |
| 2814 | |
| 2815 | /* How large should a single buffer be so a queue full of these can fit at |
| 2816 | * least one full packet? |
| 2817 | * Logic below assumes the mergeable buffer header is used. |
| 2818 | */ |
| 2819 | static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) |
| 2820 | { |
| 2821 | const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
| 2822 | unsigned int rq_size = virtqueue_get_vring_size(vq); |
| 2823 | unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; |
| 2824 | unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; |
| 2825 | unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); |
| 2826 | |
| 2827 | return max(max(min_buf_len, hdr_len) - hdr_len, |
| 2828 | (unsigned int)GOOD_PACKET_LEN); |
| 2829 | } |
| 2830 | |
| 2831 | static int virtnet_find_vqs(struct virtnet_info *vi) |
| 2832 | { |
| 2833 | vq_callback_t **callbacks; |
| 2834 | struct virtqueue **vqs; |
| 2835 | int ret = -ENOMEM; |
| 2836 | int i, total_vqs; |
| 2837 | const char **names; |
| 2838 | bool *ctx; |
| 2839 | |
| 2840 | /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by |
| 2841 | * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by |
| 2842 | * possible control vq. |
| 2843 | */ |
| 2844 | total_vqs = vi->max_queue_pairs * 2 + |
| 2845 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); |
| 2846 | |
| 2847 | /* Allocate space for find_vqs parameters */ |
| 2848 | vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); |
| 2849 | if (!vqs) |
| 2850 | goto err_vq; |
| 2851 | callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL); |
| 2852 | if (!callbacks) |
| 2853 | goto err_callback; |
| 2854 | names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL); |
| 2855 | if (!names) |
| 2856 | goto err_names; |
| 2857 | if (!vi->big_packets || vi->mergeable_rx_bufs) { |
| 2858 | ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL); |
| 2859 | if (!ctx) |
| 2860 | goto err_ctx; |
| 2861 | } else { |
| 2862 | ctx = NULL; |
| 2863 | } |
| 2864 | |
| 2865 | /* Parameters for control virtqueue, if any */ |
| 2866 | if (vi->has_cvq) { |
| 2867 | callbacks[total_vqs - 1] = NULL; |
| 2868 | names[total_vqs - 1] = "control"; |
| 2869 | } |
| 2870 | |
| 2871 | /* Allocate/initialize parameters for send/receive virtqueues */ |
| 2872 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2873 | callbacks[rxq2vq(i)] = skb_recv_done; |
| 2874 | callbacks[txq2vq(i)] = skb_xmit_done; |
| 2875 | sprintf(vi->rq[i].name, "input.%d", i); |
| 2876 | sprintf(vi->sq[i].name, "output.%d", i); |
| 2877 | names[rxq2vq(i)] = vi->rq[i].name; |
| 2878 | names[txq2vq(i)] = vi->sq[i].name; |
| 2879 | if (ctx) |
| 2880 | ctx[rxq2vq(i)] = true; |
| 2881 | } |
| 2882 | |
| 2883 | ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, |
| 2884 | names, ctx, NULL); |
| 2885 | if (ret) |
| 2886 | goto err_find; |
| 2887 | |
| 2888 | if (vi->has_cvq) { |
| 2889 | vi->cvq = vqs[total_vqs - 1]; |
| 2890 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) |
| 2891 | vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
| 2892 | } |
| 2893 | |
| 2894 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2895 | vi->rq[i].vq = vqs[rxq2vq(i)]; |
| 2896 | vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); |
| 2897 | vi->sq[i].vq = vqs[txq2vq(i)]; |
| 2898 | } |
| 2899 | |
| 2900 | /* run here: ret == 0. */ |
| 2901 | |
| 2902 | |
| 2903 | err_find: |
| 2904 | kfree(ctx); |
| 2905 | err_ctx: |
| 2906 | kfree(names); |
| 2907 | err_names: |
| 2908 | kfree(callbacks); |
| 2909 | err_callback: |
| 2910 | kfree(vqs); |
| 2911 | err_vq: |
| 2912 | return ret; |
| 2913 | } |
| 2914 | |
| 2915 | static int virtnet_alloc_queues(struct virtnet_info *vi) |
| 2916 | { |
| 2917 | int i; |
| 2918 | |
| 2919 | vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); |
| 2920 | if (!vi->ctrl) |
| 2921 | goto err_ctrl; |
| 2922 | vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); |
| 2923 | if (!vi->sq) |
| 2924 | goto err_sq; |
| 2925 | vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); |
| 2926 | if (!vi->rq) |
| 2927 | goto err_rq; |
| 2928 | |
| 2929 | INIT_DELAYED_WORK(&vi->refill, refill_work); |
| 2930 | for (i = 0; i < vi->max_queue_pairs; i++) { |
| 2931 | vi->rq[i].pages = NULL; |
| 2932 | netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, |
| 2933 | napi_weight); |
| 2934 | netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, |
| 2935 | napi_tx ? napi_weight : 0); |
| 2936 | |
| 2937 | sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); |
| 2938 | ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); |
| 2939 | sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); |
| 2940 | |
| 2941 | u64_stats_init(&vi->rq[i].stats.syncp); |
| 2942 | u64_stats_init(&vi->sq[i].stats.syncp); |
| 2943 | } |
| 2944 | |
| 2945 | return 0; |
| 2946 | |
| 2947 | err_rq: |
| 2948 | kfree(vi->sq); |
| 2949 | err_sq: |
| 2950 | kfree(vi->ctrl); |
| 2951 | err_ctrl: |
| 2952 | return -ENOMEM; |
| 2953 | } |
| 2954 | |
| 2955 | static int init_vqs(struct virtnet_info *vi) |
| 2956 | { |
| 2957 | int ret; |
| 2958 | |
| 2959 | /* Allocate send & receive queues */ |
| 2960 | ret = virtnet_alloc_queues(vi); |
| 2961 | if (ret) |
| 2962 | goto err; |
| 2963 | |
| 2964 | ret = virtnet_find_vqs(vi); |
| 2965 | if (ret) |
| 2966 | goto err_free; |
| 2967 | |
| 2968 | get_online_cpus(); |
| 2969 | virtnet_set_affinity(vi); |
| 2970 | put_online_cpus(); |
| 2971 | |
| 2972 | return 0; |
| 2973 | |
| 2974 | err_free: |
| 2975 | virtnet_free_queues(vi); |
| 2976 | err: |
| 2977 | return ret; |
| 2978 | } |
| 2979 | |
| 2980 | #ifdef CONFIG_SYSFS |
| 2981 | static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, |
| 2982 | char *buf) |
| 2983 | { |
| 2984 | struct virtnet_info *vi = netdev_priv(queue->dev); |
| 2985 | unsigned int queue_index = get_netdev_rx_queue_index(queue); |
| 2986 | unsigned int headroom = virtnet_get_headroom(vi); |
| 2987 | unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; |
| 2988 | struct ewma_pkt_len *avg; |
| 2989 | |
| 2990 | BUG_ON(queue_index >= vi->max_queue_pairs); |
| 2991 | avg = &vi->rq[queue_index].mrg_avg_pkt_len; |
| 2992 | return sprintf(buf, "%u\n", |
| 2993 | get_mergeable_buf_len(&vi->rq[queue_index], avg, |
| 2994 | SKB_DATA_ALIGN(headroom + tailroom))); |
| 2995 | } |
| 2996 | |
| 2997 | static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = |
| 2998 | __ATTR_RO(mergeable_rx_buffer_size); |
| 2999 | |
| 3000 | static struct attribute *virtio_net_mrg_rx_attrs[] = { |
| 3001 | &mergeable_rx_buffer_size_attribute.attr, |
| 3002 | NULL |
| 3003 | }; |
| 3004 | |
| 3005 | static const struct attribute_group virtio_net_mrg_rx_group = { |
| 3006 | .name = "virtio_net", |
| 3007 | .attrs = virtio_net_mrg_rx_attrs |
| 3008 | }; |
| 3009 | #endif |
| 3010 | |
| 3011 | static bool virtnet_fail_on_feature(struct virtio_device *vdev, |
| 3012 | unsigned int fbit, |
| 3013 | const char *fname, const char *dname) |
| 3014 | { |
| 3015 | if (!virtio_has_feature(vdev, fbit)) |
| 3016 | return false; |
| 3017 | |
| 3018 | dev_err(&vdev->dev, "device advertises feature %s but not %s", |
| 3019 | fname, dname); |
| 3020 | |
| 3021 | return true; |
| 3022 | } |
| 3023 | |
| 3024 | #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ |
| 3025 | virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) |
| 3026 | |
| 3027 | static bool virtnet_validate_features(struct virtio_device *vdev) |
| 3028 | { |
| 3029 | if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && |
| 3030 | (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, |
| 3031 | "VIRTIO_NET_F_CTRL_VQ") || |
| 3032 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, |
| 3033 | "VIRTIO_NET_F_CTRL_VQ") || |
| 3034 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, |
| 3035 | "VIRTIO_NET_F_CTRL_VQ") || |
| 3036 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || |
| 3037 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, |
| 3038 | "VIRTIO_NET_F_CTRL_VQ"))) { |
| 3039 | return false; |
| 3040 | } |
| 3041 | |
| 3042 | return true; |
| 3043 | } |
| 3044 | |
| 3045 | #define MIN_MTU ETH_MIN_MTU |
| 3046 | #define MAX_MTU ETH_MAX_MTU |
| 3047 | |
| 3048 | static int virtnet_validate(struct virtio_device *vdev) |
| 3049 | { |
| 3050 | if (!vdev->config->get) { |
| 3051 | dev_err(&vdev->dev, "%s failure: config access disabled\n", |
| 3052 | __func__); |
| 3053 | return -EINVAL; |
| 3054 | } |
| 3055 | |
| 3056 | if (!virtnet_validate_features(vdev)) |
| 3057 | return -EINVAL; |
| 3058 | |
| 3059 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { |
| 3060 | int mtu = virtio_cread16(vdev, |
| 3061 | offsetof(struct virtio_net_config, |
| 3062 | mtu)); |
| 3063 | if (mtu < MIN_MTU) |
| 3064 | __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); |
| 3065 | } |
| 3066 | |
| 3067 | return 0; |
| 3068 | } |
| 3069 | |
| 3070 | static int virtnet_probe(struct virtio_device *vdev) |
| 3071 | { |
| 3072 | int i, err = -ENOMEM; |
| 3073 | struct net_device *dev; |
| 3074 | struct virtnet_info *vi; |
| 3075 | u16 max_queue_pairs; |
| 3076 | int mtu; |
| 3077 | |
| 3078 | /* Find if host supports multiqueue virtio_net device */ |
| 3079 | err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, |
| 3080 | struct virtio_net_config, |
| 3081 | max_virtqueue_pairs, &max_queue_pairs); |
| 3082 | |
| 3083 | /* We need at least 2 queue's */ |
| 3084 | if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || |
| 3085 | max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || |
| 3086 | !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
| 3087 | max_queue_pairs = 1; |
| 3088 | |
| 3089 | /* Allocate ourselves a network device with room for our info */ |
| 3090 | dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); |
| 3091 | if (!dev) |
| 3092 | return -ENOMEM; |
| 3093 | |
| 3094 | /* Set up network device as normal. */ |
| 3095 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; |
| 3096 | dev->netdev_ops = &virtnet_netdev; |
| 3097 | dev->features = NETIF_F_HIGHDMA; |
| 3098 | |
| 3099 | dev->ethtool_ops = &virtnet_ethtool_ops; |
| 3100 | SET_NETDEV_DEV(dev, &vdev->dev); |
| 3101 | |
| 3102 | /* Do we support "hardware" checksums? */ |
| 3103 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { |
| 3104 | /* This opens up the world of extra features. */ |
| 3105 | dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
| 3106 | if (csum) |
| 3107 | dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
| 3108 | |
| 3109 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { |
| 3110 | dev->hw_features |= NETIF_F_TSO |
| 3111 | | NETIF_F_TSO_ECN | NETIF_F_TSO6; |
| 3112 | } |
| 3113 | /* Individual feature bits: what can host handle? */ |
| 3114 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) |
| 3115 | dev->hw_features |= NETIF_F_TSO; |
| 3116 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) |
| 3117 | dev->hw_features |= NETIF_F_TSO6; |
| 3118 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) |
| 3119 | dev->hw_features |= NETIF_F_TSO_ECN; |
| 3120 | |
| 3121 | dev->features |= NETIF_F_GSO_ROBUST; |
| 3122 | |
| 3123 | if (gso) |
| 3124 | dev->features |= dev->hw_features & NETIF_F_ALL_TSO; |
| 3125 | /* (!csum && gso) case will be fixed by register_netdev() */ |
| 3126 | } |
| 3127 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) |
| 3128 | dev->features |= NETIF_F_RXCSUM; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3129 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
| 3130 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 3131 | dev->features |= NETIF_F_GRO_HW; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3132 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 3133 | dev->hw_features |= NETIF_F_GRO_HW; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3134 | |
| 3135 | dev->vlan_features = dev->features; |
| 3136 | |
| 3137 | /* MTU range: 68 - 65535 */ |
| 3138 | dev->min_mtu = MIN_MTU; |
| 3139 | dev->max_mtu = MAX_MTU; |
| 3140 | |
| 3141 | /* Configuration may specify what MAC to use. Otherwise random. */ |
| 3142 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) |
| 3143 | virtio_cread_bytes(vdev, |
| 3144 | offsetof(struct virtio_net_config, mac), |
| 3145 | dev->dev_addr, dev->addr_len); |
| 3146 | else |
| 3147 | eth_hw_addr_random(dev); |
| 3148 | |
| 3149 | /* Set up our device-specific information */ |
| 3150 | vi = netdev_priv(dev); |
| 3151 | vi->dev = dev; |
| 3152 | vi->vdev = vdev; |
| 3153 | vdev->priv = vi; |
| 3154 | |
| 3155 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); |
| 3156 | |
| 3157 | /* If we can receive ANY GSO packets, we must allocate large ones. */ |
| 3158 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
| 3159 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || |
| 3160 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || |
| 3161 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) |
| 3162 | vi->big_packets = true; |
| 3163 | |
| 3164 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) |
| 3165 | vi->mergeable_rx_bufs = true; |
| 3166 | |
| 3167 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || |
| 3168 | virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) |
| 3169 | vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
| 3170 | else |
| 3171 | vi->hdr_len = sizeof(struct virtio_net_hdr); |
| 3172 | |
| 3173 | if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || |
| 3174 | virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) |
| 3175 | vi->any_header_sg = true; |
| 3176 | |
| 3177 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
| 3178 | vi->has_cvq = true; |
| 3179 | |
| 3180 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { |
| 3181 | mtu = virtio_cread16(vdev, |
| 3182 | offsetof(struct virtio_net_config, |
| 3183 | mtu)); |
| 3184 | if (mtu < dev->min_mtu) { |
| 3185 | /* Should never trigger: MTU was previously validated |
| 3186 | * in virtnet_validate. |
| 3187 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3188 | dev_err(&vdev->dev, |
| 3189 | "device MTU appears to have changed it is now %d < %d", |
| 3190 | mtu, dev->min_mtu); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 3191 | err = -EINVAL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3192 | goto free; |
| 3193 | } |
| 3194 | |
| 3195 | dev->mtu = mtu; |
| 3196 | dev->max_mtu = mtu; |
| 3197 | |
| 3198 | /* TODO: size buffers correctly in this case. */ |
| 3199 | if (dev->mtu > ETH_DATA_LEN) |
| 3200 | vi->big_packets = true; |
| 3201 | } |
| 3202 | |
| 3203 | if (vi->any_header_sg) |
| 3204 | dev->needed_headroom = vi->hdr_len; |
| 3205 | |
| 3206 | /* Enable multiqueue by default */ |
| 3207 | if (num_online_cpus() >= max_queue_pairs) |
| 3208 | vi->curr_queue_pairs = max_queue_pairs; |
| 3209 | else |
| 3210 | vi->curr_queue_pairs = num_online_cpus(); |
| 3211 | vi->max_queue_pairs = max_queue_pairs; |
| 3212 | |
| 3213 | /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ |
| 3214 | err = init_vqs(vi); |
| 3215 | if (err) |
| 3216 | goto free; |
| 3217 | |
| 3218 | #ifdef CONFIG_SYSFS |
| 3219 | if (vi->mergeable_rx_bufs) |
| 3220 | dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; |
| 3221 | #endif |
| 3222 | netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); |
| 3223 | netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); |
| 3224 | |
| 3225 | virtnet_init_settings(dev); |
| 3226 | |
| 3227 | if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) { |
| 3228 | vi->failover = net_failover_create(vi->dev); |
| 3229 | if (IS_ERR(vi->failover)) { |
| 3230 | err = PTR_ERR(vi->failover); |
| 3231 | goto free_vqs; |
| 3232 | } |
| 3233 | } |
| 3234 | |
| 3235 | err = register_netdev(dev); |
| 3236 | if (err) { |
| 3237 | pr_debug("virtio_net: registering device failed\n"); |
| 3238 | goto free_failover; |
| 3239 | } |
| 3240 | |
| 3241 | virtio_device_ready(vdev); |
| 3242 | |
| 3243 | err = virtnet_cpu_notif_add(vi); |
| 3244 | if (err) { |
| 3245 | pr_debug("virtio_net: registering cpu notifier failed\n"); |
| 3246 | goto free_unregister_netdev; |
| 3247 | } |
| 3248 | |
| 3249 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
| 3250 | |
| 3251 | /* Assume link up if device can't report link status, |
| 3252 | otherwise get link status from config. */ |
| 3253 | netif_carrier_off(dev); |
| 3254 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { |
| 3255 | schedule_work(&vi->config_work); |
| 3256 | } else { |
| 3257 | vi->status = VIRTIO_NET_S_LINK_UP; |
| 3258 | virtnet_update_settings(vi); |
| 3259 | netif_carrier_on(dev); |
| 3260 | } |
| 3261 | |
| 3262 | for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) |
| 3263 | if (virtio_has_feature(vi->vdev, guest_offloads[i])) |
| 3264 | set_bit(guest_offloads[i], &vi->guest_offloads); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3265 | vi->guest_offloads_capable = vi->guest_offloads; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3266 | |
| 3267 | pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", |
| 3268 | dev->name, max_queue_pairs); |
| 3269 | |
| 3270 | return 0; |
| 3271 | |
| 3272 | free_unregister_netdev: |
| 3273 | vi->vdev->config->reset(vdev); |
| 3274 | |
| 3275 | unregister_netdev(dev); |
| 3276 | free_failover: |
| 3277 | net_failover_destroy(vi->failover); |
| 3278 | free_vqs: |
| 3279 | cancel_delayed_work_sync(&vi->refill); |
| 3280 | free_receive_page_frags(vi); |
| 3281 | virtnet_del_vqs(vi); |
| 3282 | free: |
| 3283 | free_netdev(dev); |
| 3284 | return err; |
| 3285 | } |
| 3286 | |
| 3287 | static void remove_vq_common(struct virtnet_info *vi) |
| 3288 | { |
| 3289 | vi->vdev->config->reset(vi->vdev); |
| 3290 | |
| 3291 | /* Free unused buffers in both send and recv, if any. */ |
| 3292 | free_unused_bufs(vi); |
| 3293 | |
| 3294 | free_receive_bufs(vi); |
| 3295 | |
| 3296 | free_receive_page_frags(vi); |
| 3297 | |
| 3298 | virtnet_del_vqs(vi); |
| 3299 | } |
| 3300 | |
| 3301 | static void virtnet_remove(struct virtio_device *vdev) |
| 3302 | { |
| 3303 | struct virtnet_info *vi = vdev->priv; |
| 3304 | |
| 3305 | virtnet_cpu_notif_remove(vi); |
| 3306 | |
| 3307 | /* Make sure no work handler is accessing the device. */ |
| 3308 | flush_work(&vi->config_work); |
| 3309 | |
| 3310 | unregister_netdev(vi->dev); |
| 3311 | |
| 3312 | net_failover_destroy(vi->failover); |
| 3313 | |
| 3314 | remove_vq_common(vi); |
| 3315 | |
| 3316 | free_netdev(vi->dev); |
| 3317 | } |
| 3318 | |
| 3319 | static __maybe_unused int virtnet_freeze(struct virtio_device *vdev) |
| 3320 | { |
| 3321 | struct virtnet_info *vi = vdev->priv; |
| 3322 | |
| 3323 | virtnet_cpu_notif_remove(vi); |
| 3324 | virtnet_freeze_down(vdev); |
| 3325 | remove_vq_common(vi); |
| 3326 | |
| 3327 | return 0; |
| 3328 | } |
| 3329 | |
| 3330 | static __maybe_unused int virtnet_restore(struct virtio_device *vdev) |
| 3331 | { |
| 3332 | struct virtnet_info *vi = vdev->priv; |
| 3333 | int err; |
| 3334 | |
| 3335 | err = virtnet_restore_up(vdev); |
| 3336 | if (err) |
| 3337 | return err; |
| 3338 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
| 3339 | |
| 3340 | err = virtnet_cpu_notif_add(vi); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 3341 | if (err) { |
| 3342 | virtnet_freeze_down(vdev); |
| 3343 | remove_vq_common(vi); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3344 | return err; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 3345 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3346 | |
| 3347 | return 0; |
| 3348 | } |
| 3349 | |
| 3350 | static struct virtio_device_id id_table[] = { |
| 3351 | { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, |
| 3352 | { 0 }, |
| 3353 | }; |
| 3354 | |
| 3355 | #define VIRTNET_FEATURES \ |
| 3356 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ |
| 3357 | VIRTIO_NET_F_MAC, \ |
| 3358 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ |
| 3359 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ |
| 3360 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ |
| 3361 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ |
| 3362 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ |
| 3363 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ |
| 3364 | VIRTIO_NET_F_CTRL_MAC_ADDR, \ |
| 3365 | VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ |
| 3366 | VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY |
| 3367 | |
| 3368 | static unsigned int features[] = { |
| 3369 | VIRTNET_FEATURES, |
| 3370 | }; |
| 3371 | |
| 3372 | static unsigned int features_legacy[] = { |
| 3373 | VIRTNET_FEATURES, |
| 3374 | VIRTIO_NET_F_GSO, |
| 3375 | VIRTIO_F_ANY_LAYOUT, |
| 3376 | }; |
| 3377 | |
| 3378 | static struct virtio_driver virtio_net_driver = { |
| 3379 | .feature_table = features, |
| 3380 | .feature_table_size = ARRAY_SIZE(features), |
| 3381 | .feature_table_legacy = features_legacy, |
| 3382 | .feature_table_size_legacy = ARRAY_SIZE(features_legacy), |
| 3383 | .driver.name = KBUILD_MODNAME, |
| 3384 | .driver.owner = THIS_MODULE, |
| 3385 | .id_table = id_table, |
| 3386 | .validate = virtnet_validate, |
| 3387 | .probe = virtnet_probe, |
| 3388 | .remove = virtnet_remove, |
| 3389 | .config_changed = virtnet_config_changed, |
| 3390 | #ifdef CONFIG_PM_SLEEP |
| 3391 | .freeze = virtnet_freeze, |
| 3392 | .restore = virtnet_restore, |
| 3393 | #endif |
| 3394 | }; |
| 3395 | |
| 3396 | static __init int virtio_net_driver_init(void) |
| 3397 | { |
| 3398 | int ret; |
| 3399 | |
| 3400 | ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online", |
| 3401 | virtnet_cpu_online, |
| 3402 | virtnet_cpu_down_prep); |
| 3403 | if (ret < 0) |
| 3404 | goto out; |
| 3405 | virtionet_online = ret; |
| 3406 | ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead", |
| 3407 | NULL, virtnet_cpu_dead); |
| 3408 | if (ret) |
| 3409 | goto err_dead; |
| 3410 | |
| 3411 | ret = register_virtio_driver(&virtio_net_driver); |
| 3412 | if (ret) |
| 3413 | goto err_virtio; |
| 3414 | return 0; |
| 3415 | err_virtio: |
| 3416 | cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); |
| 3417 | err_dead: |
| 3418 | cpuhp_remove_multi_state(virtionet_online); |
| 3419 | out: |
| 3420 | return ret; |
| 3421 | } |
| 3422 | module_init(virtio_net_driver_init); |
| 3423 | |
| 3424 | static __exit void virtio_net_driver_exit(void) |
| 3425 | { |
| 3426 | unregister_virtio_driver(&virtio_net_driver); |
| 3427 | cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); |
| 3428 | cpuhp_remove_multi_state(virtionet_online); |
| 3429 | } |
| 3430 | module_exit(virtio_net_driver_exit); |
| 3431 | |
| 3432 | MODULE_DEVICE_TABLE(virtio, id_table); |
| 3433 | MODULE_DESCRIPTION("Virtio network driver"); |
| 3434 | MODULE_LICENSE("GPL"); |