blob: 1a69b5246133b1310e06f670d15be400f22b540e [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Virtual network driver for conversing with remote driver backends.
3 *
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/skbuff.h>
39#include <linux/ethtool.h>
40#include <linux/if_ether.h>
41#include <net/tcp.h>
42#include <linux/udp.h>
43#include <linux/moduleparam.h>
44#include <linux/mm.h>
45#include <linux/slab.h>
46#include <net/ip.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020047#include <linux/bpf.h>
48#include <net/page_pool.h>
49#include <linux/bpf_trace.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000050
51#include <xen/xen.h>
52#include <xen/xenbus.h>
53#include <xen/events.h>
54#include <xen/page.h>
55#include <xen/platform_pci.h>
56#include <xen/grant_table.h>
57
58#include <xen/interface/io/netif.h>
59#include <xen/interface/memory.h>
60#include <xen/interface/grant_table.h>
61
62/* Module parameters */
63#define MAX_QUEUES_DEFAULT 8
64static unsigned int xennet_max_queues;
65module_param_named(max_queues, xennet_max_queues, uint, 0644);
66MODULE_PARM_DESC(max_queues,
67 "Maximum number of queues per virtual interface");
68
Olivier Deprez0e641232021-09-23 10:07:05 +020069#define XENNET_TIMEOUT (5 * HZ)
70
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000071static const struct ethtool_ops xennet_ethtool_ops;
72
73struct netfront_cb {
74 int pull_to;
75};
76
77#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
78
79#define RX_COPY_THRESHOLD 256
80
81#define GRANT_INVALID_REF 0
82
83#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
84#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
85
86/* Minimum number of Rx slots (includes slot for GSO metadata). */
87#define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
88
89/* Queue name is interface name with "-qNNN" appended */
90#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
91
92/* IRQ name is queue name with "-tx" or "-rx" appended */
93#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
94
95static DECLARE_WAIT_QUEUE_HEAD(module_wq);
96
97struct netfront_stats {
98 u64 packets;
99 u64 bytes;
100 struct u64_stats_sync syncp;
101};
102
103struct netfront_info;
104
105struct netfront_queue {
106 unsigned int id; /* Queue ID, 0-based */
107 char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
108 struct netfront_info *info;
109
Olivier Deprez157378f2022-04-04 15:47:50 +0200110 struct bpf_prog __rcu *xdp_prog;
111
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000112 struct napi_struct napi;
113
114 /* Split event channels support, tx_* == rx_* when using
115 * single event channel.
116 */
117 unsigned int tx_evtchn, rx_evtchn;
118 unsigned int tx_irq, rx_irq;
119 /* Only used when split event channels support is enabled */
120 char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
121 char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
122
123 spinlock_t tx_lock;
124 struct xen_netif_tx_front_ring tx;
125 int tx_ring_ref;
126
127 /*
128 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
Olivier Deprez157378f2022-04-04 15:47:50 +0200129 * are linked from tx_skb_freelist through tx_link.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200131 struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
132 unsigned short tx_link[NET_TX_RING_SIZE];
133#define TX_LINK_NONE 0xffff
134#define TX_PENDING 0xfffe
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000135 grant_ref_t gref_tx_head;
136 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
137 struct page *grant_tx_page[NET_TX_RING_SIZE];
138 unsigned tx_skb_freelist;
Olivier Deprez157378f2022-04-04 15:47:50 +0200139 unsigned int tx_pend_queue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000140
141 spinlock_t rx_lock ____cacheline_aligned_in_smp;
142 struct xen_netif_rx_front_ring rx;
143 int rx_ring_ref;
144
145 struct timer_list rx_refill_timer;
146
147 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
148 grant_ref_t gref_rx_head;
149 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
Olivier Deprez157378f2022-04-04 15:47:50 +0200150
151 unsigned int rx_rsp_unconsumed;
152 spinlock_t rx_cons_lock;
153
154 struct page_pool *page_pool;
155 struct xdp_rxq_info xdp_rxq;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000156};
157
158struct netfront_info {
159 struct list_head list;
160 struct net_device *netdev;
161
162 struct xenbus_device *xbdev;
163
164 /* Multi-queue support */
165 struct netfront_queue *queues;
166
167 /* Statistics */
168 struct netfront_stats __percpu *rx_stats;
169 struct netfront_stats __percpu *tx_stats;
170
Olivier Deprez157378f2022-04-04 15:47:50 +0200171 /* XDP state */
172 bool netback_has_xdp_headroom;
173 bool netfront_xdp_enabled;
174
175 /* Is device behaving sane? */
176 bool broken;
177
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000178 atomic_t rx_gso_checksum_fixup;
179};
180
181struct netfront_rx_info {
182 struct xen_netif_rx_response rx;
183 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
184};
185
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000186/*
187 * Access macros for acquiring freeing slots in tx_skbs[].
188 */
189
Olivier Deprez157378f2022-04-04 15:47:50 +0200190static void add_id_to_list(unsigned *head, unsigned short *list,
191 unsigned short id)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000192{
Olivier Deprez157378f2022-04-04 15:47:50 +0200193 list[id] = *head;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000194 *head = id;
195}
196
Olivier Deprez157378f2022-04-04 15:47:50 +0200197static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000198{
199 unsigned int id = *head;
Olivier Deprez157378f2022-04-04 15:47:50 +0200200
201 if (id != TX_LINK_NONE) {
202 *head = list[id];
203 list[id] = TX_LINK_NONE;
204 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000205 return id;
206}
207
208static int xennet_rxidx(RING_IDX idx)
209{
210 return idx & (NET_RX_RING_SIZE - 1);
211}
212
213static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
214 RING_IDX ri)
215{
216 int i = xennet_rxidx(ri);
217 struct sk_buff *skb = queue->rx_skbs[i];
218 queue->rx_skbs[i] = NULL;
219 return skb;
220}
221
222static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
223 RING_IDX ri)
224{
225 int i = xennet_rxidx(ri);
226 grant_ref_t ref = queue->grant_rx_ref[i];
227 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
228 return ref;
229}
230
231#ifdef CONFIG_SYSFS
232static const struct attribute_group xennet_dev_group;
233#endif
234
235static bool xennet_can_sg(struct net_device *dev)
236{
237 return dev->features & NETIF_F_SG;
238}
239
240
241static void rx_refill_timeout(struct timer_list *t)
242{
243 struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
244 napi_schedule(&queue->napi);
245}
246
247static int netfront_tx_slot_available(struct netfront_queue *queue)
248{
249 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
250 (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
251}
252
253static void xennet_maybe_wake_tx(struct netfront_queue *queue)
254{
255 struct net_device *dev = queue->info->netdev;
256 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
257
258 if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
259 netfront_tx_slot_available(queue) &&
260 likely(netif_running(dev)))
261 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
262}
263
264
265static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
266{
267 struct sk_buff *skb;
268 struct page *page;
269
270 skb = __netdev_alloc_skb(queue->info->netdev,
271 RX_COPY_THRESHOLD + NET_IP_ALIGN,
272 GFP_ATOMIC | __GFP_NOWARN);
273 if (unlikely(!skb))
274 return NULL;
275
Olivier Deprez157378f2022-04-04 15:47:50 +0200276 page = page_pool_dev_alloc_pages(queue->page_pool);
277 if (unlikely(!page)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000278 kfree_skb(skb);
279 return NULL;
280 }
281 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
282
283 /* Align ip header to a 16 bytes boundary */
284 skb_reserve(skb, NET_IP_ALIGN);
285 skb->dev = queue->info->netdev;
286
287 return skb;
288}
289
290
291static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
292{
293 RING_IDX req_prod = queue->rx.req_prod_pvt;
294 int notify;
295 int err = 0;
296
297 if (unlikely(!netif_carrier_ok(queue->info->netdev)))
298 return;
299
300 for (req_prod = queue->rx.req_prod_pvt;
301 req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
302 req_prod++) {
303 struct sk_buff *skb;
304 unsigned short id;
305 grant_ref_t ref;
306 struct page *page;
307 struct xen_netif_rx_request *req;
308
309 skb = xennet_alloc_one_rx_buffer(queue);
310 if (!skb) {
311 err = -ENOMEM;
312 break;
313 }
314
315 id = xennet_rxidx(req_prod);
316
317 BUG_ON(queue->rx_skbs[id]);
318 queue->rx_skbs[id] = skb;
319
320 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
321 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
322 queue->grant_rx_ref[id] = ref;
323
324 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
325
326 req = RING_GET_REQUEST(&queue->rx, req_prod);
327 gnttab_page_grant_foreign_access_ref_one(ref,
328 queue->info->xbdev->otherend_id,
329 page,
330 0);
331 req->id = id;
332 req->gref = ref;
333 }
334
335 queue->rx.req_prod_pvt = req_prod;
336
337 /* Try again later if there are not enough requests or skb allocation
338 * failed.
339 * Enough requests is quantified as the sum of newly created slots and
340 * the unconsumed slots at the backend.
341 */
342 if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
343 unlikely(err)) {
344 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
345 return;
346 }
347
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000348 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
349 if (notify)
350 notify_remote_via_irq(queue->rx_irq);
351}
352
353static int xennet_open(struct net_device *dev)
354{
355 struct netfront_info *np = netdev_priv(dev);
356 unsigned int num_queues = dev->real_num_tx_queues;
357 unsigned int i = 0;
358 struct netfront_queue *queue = NULL;
359
Olivier Deprez157378f2022-04-04 15:47:50 +0200360 if (!np->queues || np->broken)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000361 return -ENODEV;
362
363 for (i = 0; i < num_queues; ++i) {
364 queue = &np->queues[i];
365 napi_enable(&queue->napi);
366
367 spin_lock_bh(&queue->rx_lock);
368 if (netif_carrier_ok(dev)) {
369 xennet_alloc_rx_buffers(queue);
370 queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
371 if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
372 napi_schedule(&queue->napi);
373 }
374 spin_unlock_bh(&queue->rx_lock);
375 }
376
377 netif_tx_start_all_queues(dev);
378
379 return 0;
380}
381
Olivier Deprez157378f2022-04-04 15:47:50 +0200382static bool xennet_tx_buf_gc(struct netfront_queue *queue)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000383{
384 RING_IDX cons, prod;
385 unsigned short id;
386 struct sk_buff *skb;
387 bool more_to_do;
Olivier Deprez157378f2022-04-04 15:47:50 +0200388 bool work_done = false;
389 const struct device *dev = &queue->info->netdev->dev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000390
391 BUG_ON(!netif_carrier_ok(queue->info->netdev));
392
393 do {
394 prod = queue->tx.sring->rsp_prod;
Olivier Deprez157378f2022-04-04 15:47:50 +0200395 if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
396 dev_alert(dev, "Illegal number of responses %u\n",
397 prod - queue->tx.rsp_cons);
398 goto err;
399 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000400 rmb(); /* Ensure we see responses up to 'rp'. */
401
402 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200403 struct xen_netif_tx_response txrsp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000404
Olivier Deprez157378f2022-04-04 15:47:50 +0200405 work_done = true;
406
407 RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
408 if (txrsp.status == XEN_NETIF_RSP_NULL)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000409 continue;
410
Olivier Deprez157378f2022-04-04 15:47:50 +0200411 id = txrsp.id;
412 if (id >= RING_SIZE(&queue->tx)) {
413 dev_alert(dev,
414 "Response has incorrect id (%u)\n",
415 id);
416 goto err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000417 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200418 if (queue->tx_link[id] != TX_PENDING) {
419 dev_alert(dev,
420 "Response for inactive request\n");
421 goto err;
422 }
423
424 queue->tx_link[id] = TX_LINK_NONE;
425 skb = queue->tx_skbs[id];
426 queue->tx_skbs[id] = NULL;
427 if (unlikely(!gnttab_end_foreign_access_ref(
428 queue->grant_tx_ref[id], GNTMAP_readonly))) {
429 dev_alert(dev,
430 "Grant still in use by backend domain\n");
431 goto err;
432 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000433 gnttab_release_grant_reference(
434 &queue->gref_tx_head, queue->grant_tx_ref[id]);
435 queue->grant_tx_ref[id] = GRANT_INVALID_REF;
436 queue->grant_tx_page[id] = NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +0200437 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000438 dev_kfree_skb_irq(skb);
439 }
440
441 queue->tx.rsp_cons = prod;
442
443 RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
444 } while (more_to_do);
445
446 xennet_maybe_wake_tx(queue);
Olivier Deprez157378f2022-04-04 15:47:50 +0200447
448 return work_done;
449
450 err:
451 queue->info->broken = true;
452 dev_alert(dev, "Disabled for further use\n");
453
454 return work_done;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000455}
456
457struct xennet_gnttab_make_txreq {
458 struct netfront_queue *queue;
459 struct sk_buff *skb;
460 struct page *page;
Olivier Deprez157378f2022-04-04 15:47:50 +0200461 struct xen_netif_tx_request *tx; /* Last request on ring page */
462 struct xen_netif_tx_request tx_local; /* Last request local copy*/
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000463 unsigned int size;
464};
465
466static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
467 unsigned int len, void *data)
468{
469 struct xennet_gnttab_make_txreq *info = data;
470 unsigned int id;
471 struct xen_netif_tx_request *tx;
472 grant_ref_t ref;
473 /* convenient aliases */
474 struct page *page = info->page;
475 struct netfront_queue *queue = info->queue;
476 struct sk_buff *skb = info->skb;
477
Olivier Deprez157378f2022-04-04 15:47:50 +0200478 id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000479 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
480 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
481 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
482
483 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
484 gfn, GNTMAP_readonly);
485
Olivier Deprez157378f2022-04-04 15:47:50 +0200486 queue->tx_skbs[id] = skb;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000487 queue->grant_tx_page[id] = page;
488 queue->grant_tx_ref[id] = ref;
489
Olivier Deprez157378f2022-04-04 15:47:50 +0200490 info->tx_local.id = id;
491 info->tx_local.gref = ref;
492 info->tx_local.offset = offset;
493 info->tx_local.size = len;
494 info->tx_local.flags = 0;
495
496 *tx = info->tx_local;
497
498 /*
499 * Put the request in the pending queue, it will be set to be pending
500 * when the producer index is about to be raised.
501 */
502 add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000503
504 info->tx = tx;
Olivier Deprez157378f2022-04-04 15:47:50 +0200505 info->size += info->tx_local.size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000506}
507
508static struct xen_netif_tx_request *xennet_make_first_txreq(
Olivier Deprez157378f2022-04-04 15:47:50 +0200509 struct xennet_gnttab_make_txreq *info,
510 unsigned int offset, unsigned int len)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000511{
Olivier Deprez157378f2022-04-04 15:47:50 +0200512 info->size = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000513
Olivier Deprez157378f2022-04-04 15:47:50 +0200514 gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000515
Olivier Deprez157378f2022-04-04 15:47:50 +0200516 return info->tx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000517}
518
519static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
520 unsigned int len, void *data)
521{
522 struct xennet_gnttab_make_txreq *info = data;
523
524 info->tx->flags |= XEN_NETTXF_more_data;
525 skb_get(info->skb);
526 xennet_tx_setup_grant(gfn, offset, len, data);
527}
528
Olivier Deprez157378f2022-04-04 15:47:50 +0200529static void xennet_make_txreqs(
530 struct xennet_gnttab_make_txreq *info,
531 struct page *page,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000532 unsigned int offset, unsigned int len)
533{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000534 /* Skip unused frames from start of page */
535 page += offset >> PAGE_SHIFT;
536 offset &= ~PAGE_MASK;
537
538 while (len) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200539 info->page = page;
540 info->size = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000541
542 gnttab_foreach_grant_in_range(page, offset, len,
543 xennet_make_one_txreq,
Olivier Deprez157378f2022-04-04 15:47:50 +0200544 info);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000545
546 page++;
547 offset = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +0200548 len -= info->size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000549 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000550}
551
552/*
553 * Count how many ring slots are required to send this skb. Each frag
554 * might be a compound page.
555 */
556static int xennet_count_skb_slots(struct sk_buff *skb)
557{
558 int i, frags = skb_shinfo(skb)->nr_frags;
559 int slots;
560
561 slots = gnttab_count_grant(offset_in_page(skb->data),
562 skb_headlen(skb));
563
564 for (i = 0; i < frags; i++) {
565 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
566 unsigned long size = skb_frag_size(frag);
David Brazdil0f672f62019-12-10 10:32:29 +0000567 unsigned long offset = skb_frag_off(frag);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000568
569 /* Skip unused frames from start of page */
570 offset &= ~PAGE_MASK;
571
572 slots += gnttab_count_grant(offset, size);
573 }
574
575 return slots;
576}
577
578static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
David Brazdil0f672f62019-12-10 10:32:29 +0000579 struct net_device *sb_dev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000580{
581 unsigned int num_queues = dev->real_num_tx_queues;
582 u32 hash;
583 u16 queue_idx;
584
585 /* First, check if there is only one queue */
586 if (num_queues == 1) {
587 queue_idx = 0;
588 } else {
589 hash = skb_get_hash(skb);
590 queue_idx = hash % num_queues;
591 }
592
593 return queue_idx;
594}
595
Olivier Deprez157378f2022-04-04 15:47:50 +0200596static void xennet_mark_tx_pending(struct netfront_queue *queue)
597{
598 unsigned int i;
599
600 while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
601 TX_LINK_NONE)
602 queue->tx_link[i] = TX_PENDING;
603}
604
605static int xennet_xdp_xmit_one(struct net_device *dev,
606 struct netfront_queue *queue,
607 struct xdp_frame *xdpf)
608{
609 struct netfront_info *np = netdev_priv(dev);
610 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
611 struct xennet_gnttab_make_txreq info = {
612 .queue = queue,
613 .skb = NULL,
614 .page = virt_to_page(xdpf->data),
615 };
616 int notify;
617
618 xennet_make_first_txreq(&info,
619 offset_in_page(xdpf->data),
620 xdpf->len);
621
622 xennet_mark_tx_pending(queue);
623
624 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
625 if (notify)
626 notify_remote_via_irq(queue->tx_irq);
627
628 u64_stats_update_begin(&tx_stats->syncp);
629 tx_stats->bytes += xdpf->len;
630 tx_stats->packets++;
631 u64_stats_update_end(&tx_stats->syncp);
632
633 xennet_tx_buf_gc(queue);
634
635 return 0;
636}
637
638static int xennet_xdp_xmit(struct net_device *dev, int n,
639 struct xdp_frame **frames, u32 flags)
640{
641 unsigned int num_queues = dev->real_num_tx_queues;
642 struct netfront_info *np = netdev_priv(dev);
643 struct netfront_queue *queue = NULL;
644 unsigned long irq_flags;
645 int drops = 0;
646 int i, err;
647
648 if (unlikely(np->broken))
649 return -ENODEV;
650 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
651 return -EINVAL;
652
653 queue = &np->queues[smp_processor_id() % num_queues];
654
655 spin_lock_irqsave(&queue->tx_lock, irq_flags);
656 for (i = 0; i < n; i++) {
657 struct xdp_frame *xdpf = frames[i];
658
659 if (!xdpf)
660 continue;
661 err = xennet_xdp_xmit_one(dev, queue, xdpf);
662 if (err) {
663 xdp_return_frame_rx_napi(xdpf);
664 drops++;
665 }
666 }
667 spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
668
669 return n - drops;
670}
671
672
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000673#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
674
675static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
676{
677 struct netfront_info *np = netdev_priv(dev);
678 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
Olivier Deprez157378f2022-04-04 15:47:50 +0200679 struct xen_netif_tx_request *first_tx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000680 unsigned int i;
681 int notify;
682 int slots;
683 struct page *page;
684 unsigned int offset;
685 unsigned int len;
686 unsigned long flags;
687 struct netfront_queue *queue = NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +0200688 struct xennet_gnttab_make_txreq info = { };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000689 unsigned int num_queues = dev->real_num_tx_queues;
690 u16 queue_index;
691 struct sk_buff *nskb;
692
693 /* Drop the packet if no queues are set up */
694 if (num_queues < 1)
695 goto drop;
Olivier Deprez157378f2022-04-04 15:47:50 +0200696 if (unlikely(np->broken))
697 goto drop;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000698 /* Determine which queue to transmit this SKB on */
699 queue_index = skb_get_queue_mapping(skb);
700 queue = &np->queues[queue_index];
701
702 /* If skb->len is too big for wire format, drop skb and alert
703 * user about misconfiguration.
704 */
705 if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
706 net_alert_ratelimited(
707 "xennet: skb->len = %u, too big for wire format\n",
708 skb->len);
709 goto drop;
710 }
711
712 slots = xennet_count_skb_slots(skb);
713 if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
714 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
715 slots, skb->len);
716 if (skb_linearize(skb))
717 goto drop;
718 }
719
720 page = virt_to_page(skb->data);
721 offset = offset_in_page(skb->data);
722
723 /* The first req should be at least ETH_HLEN size or the packet will be
724 * dropped by netback.
725 */
726 if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
727 nskb = skb_copy(skb, GFP_ATOMIC);
728 if (!nskb)
729 goto drop;
730 dev_consume_skb_any(skb);
731 skb = nskb;
732 page = virt_to_page(skb->data);
733 offset = offset_in_page(skb->data);
734 }
735
736 len = skb_headlen(skb);
737
738 spin_lock_irqsave(&queue->tx_lock, flags);
739
740 if (unlikely(!netif_carrier_ok(dev) ||
741 (slots > 1 && !xennet_can_sg(dev)) ||
742 netif_needs_gso(skb, netif_skb_features(skb)))) {
743 spin_unlock_irqrestore(&queue->tx_lock, flags);
744 goto drop;
745 }
746
747 /* First request for the linear area. */
Olivier Deprez157378f2022-04-04 15:47:50 +0200748 info.queue = queue;
749 info.skb = skb;
750 info.page = page;
751 first_tx = xennet_make_first_txreq(&info, offset, len);
752 offset += info.tx_local.size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000753 if (offset == PAGE_SIZE) {
754 page++;
755 offset = 0;
756 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200757 len -= info.tx_local.size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000758
759 if (skb->ip_summed == CHECKSUM_PARTIAL)
760 /* local packet? */
Olivier Deprez157378f2022-04-04 15:47:50 +0200761 first_tx->flags |= XEN_NETTXF_csum_blank |
762 XEN_NETTXF_data_validated;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000763 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
764 /* remote but checksummed. */
Olivier Deprez157378f2022-04-04 15:47:50 +0200765 first_tx->flags |= XEN_NETTXF_data_validated;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000766
767 /* Optional extra info after the first request. */
768 if (skb_shinfo(skb)->gso_size) {
769 struct xen_netif_extra_info *gso;
770
771 gso = (struct xen_netif_extra_info *)
772 RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
773
Olivier Deprez157378f2022-04-04 15:47:50 +0200774 first_tx->flags |= XEN_NETTXF_extra_info;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000775
776 gso->u.gso.size = skb_shinfo(skb)->gso_size;
777 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
778 XEN_NETIF_GSO_TYPE_TCPV6 :
779 XEN_NETIF_GSO_TYPE_TCPV4;
780 gso->u.gso.pad = 0;
781 gso->u.gso.features = 0;
782
783 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
784 gso->flags = 0;
785 }
786
787 /* Requests for the rest of the linear area. */
Olivier Deprez157378f2022-04-04 15:47:50 +0200788 xennet_make_txreqs(&info, page, offset, len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000789
790 /* Requests for all the frags. */
791 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
792 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Olivier Deprez157378f2022-04-04 15:47:50 +0200793 xennet_make_txreqs(&info, skb_frag_page(frag),
David Brazdil0f672f62019-12-10 10:32:29 +0000794 skb_frag_off(frag),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000795 skb_frag_size(frag));
796 }
797
798 /* First request has the packet length. */
799 first_tx->size = skb->len;
800
Olivier Deprez157378f2022-04-04 15:47:50 +0200801 /* timestamp packet in software */
802 skb_tx_timestamp(skb);
803
804 xennet_mark_tx_pending(queue);
805
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000806 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
807 if (notify)
808 notify_remote_via_irq(queue->tx_irq);
809
810 u64_stats_update_begin(&tx_stats->syncp);
811 tx_stats->bytes += skb->len;
812 tx_stats->packets++;
813 u64_stats_update_end(&tx_stats->syncp);
814
815 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
816 xennet_tx_buf_gc(queue);
817
818 if (!netfront_tx_slot_available(queue))
819 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
820
821 spin_unlock_irqrestore(&queue->tx_lock, flags);
822
823 return NETDEV_TX_OK;
824
825 drop:
826 dev->stats.tx_dropped++;
827 dev_kfree_skb_any(skb);
828 return NETDEV_TX_OK;
829}
830
831static int xennet_close(struct net_device *dev)
832{
833 struct netfront_info *np = netdev_priv(dev);
834 unsigned int num_queues = dev->real_num_tx_queues;
835 unsigned int i;
836 struct netfront_queue *queue;
837 netif_tx_stop_all_queues(np->netdev);
838 for (i = 0; i < num_queues; ++i) {
839 queue = &np->queues[i];
840 napi_disable(&queue->napi);
841 }
842 return 0;
843}
844
Olivier Deprez157378f2022-04-04 15:47:50 +0200845static void xennet_destroy_queues(struct netfront_info *info)
846{
847 unsigned int i;
848
849 for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
850 struct netfront_queue *queue = &info->queues[i];
851
852 if (netif_running(info->netdev))
853 napi_disable(&queue->napi);
854 netif_napi_del(&queue->napi);
855 }
856
857 kfree(info->queues);
858 info->queues = NULL;
859}
860
861static void xennet_uninit(struct net_device *dev)
862{
863 struct netfront_info *np = netdev_priv(dev);
864 xennet_destroy_queues(np);
865}
866
867static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
868{
869 unsigned long flags;
870
871 spin_lock_irqsave(&queue->rx_cons_lock, flags);
872 queue->rx.rsp_cons = val;
873 queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
874 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
875}
876
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000877static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
878 grant_ref_t ref)
879{
880 int new = xennet_rxidx(queue->rx.req_prod_pvt);
881
882 BUG_ON(queue->rx_skbs[new]);
883 queue->rx_skbs[new] = skb;
884 queue->grant_rx_ref[new] = ref;
885 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
886 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
887 queue->rx.req_prod_pvt++;
888}
889
890static int xennet_get_extras(struct netfront_queue *queue,
891 struct xen_netif_extra_info *extras,
892 RING_IDX rp)
893
894{
Olivier Deprez157378f2022-04-04 15:47:50 +0200895 struct xen_netif_extra_info extra;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000896 struct device *dev = &queue->info->netdev->dev;
897 RING_IDX cons = queue->rx.rsp_cons;
898 int err = 0;
899
900 do {
901 struct sk_buff *skb;
902 grant_ref_t ref;
903
904 if (unlikely(cons + 1 == rp)) {
905 if (net_ratelimit())
906 dev_warn(dev, "Missing extra info\n");
907 err = -EBADR;
908 break;
909 }
910
Olivier Deprez157378f2022-04-04 15:47:50 +0200911 RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000912
Olivier Deprez157378f2022-04-04 15:47:50 +0200913 if (unlikely(!extra.type ||
914 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000915 if (net_ratelimit())
916 dev_warn(dev, "Invalid extra type: %d\n",
Olivier Deprez157378f2022-04-04 15:47:50 +0200917 extra.type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000918 err = -EINVAL;
919 } else {
Olivier Deprez157378f2022-04-04 15:47:50 +0200920 extras[extra.type - 1] = extra;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000921 }
922
923 skb = xennet_get_rx_skb(queue, cons);
924 ref = xennet_get_rx_ref(queue, cons);
925 xennet_move_rx_slot(queue, skb, ref);
Olivier Deprez157378f2022-04-04 15:47:50 +0200926 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000927
Olivier Deprez157378f2022-04-04 15:47:50 +0200928 xennet_set_rx_rsp_cons(queue, cons);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000929 return err;
930}
931
Olivier Deprez157378f2022-04-04 15:47:50 +0200932static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
933 struct xen_netif_rx_response *rx, struct bpf_prog *prog,
934 struct xdp_buff *xdp, bool *need_xdp_flush)
935{
936 struct xdp_frame *xdpf;
937 u32 len = rx->status;
938 u32 act;
939 int err;
940
941 xdp->data_hard_start = page_address(pdata);
942 xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
943 xdp_set_data_meta_invalid(xdp);
944 xdp->data_end = xdp->data + len;
945 xdp->rxq = &queue->xdp_rxq;
946 xdp->frame_sz = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
947
948 act = bpf_prog_run_xdp(prog, xdp);
949 switch (act) {
950 case XDP_TX:
951 get_page(pdata);
952 xdpf = xdp_convert_buff_to_frame(xdp);
953 err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
954 if (unlikely(err < 0))
955 trace_xdp_exception(queue->info->netdev, prog, act);
956 break;
957 case XDP_REDIRECT:
958 get_page(pdata);
959 err = xdp_do_redirect(queue->info->netdev, xdp, prog);
960 *need_xdp_flush = true;
961 if (unlikely(err))
962 trace_xdp_exception(queue->info->netdev, prog, act);
963 break;
964 case XDP_PASS:
965 case XDP_DROP:
966 break;
967
968 case XDP_ABORTED:
969 trace_xdp_exception(queue->info->netdev, prog, act);
970 break;
971
972 default:
973 bpf_warn_invalid_xdp_action(act);
974 }
975
976 return act;
977}
978
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000979static int xennet_get_responses(struct netfront_queue *queue,
980 struct netfront_rx_info *rinfo, RING_IDX rp,
Olivier Deprez157378f2022-04-04 15:47:50 +0200981 struct sk_buff_head *list,
982 bool *need_xdp_flush)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000983{
Olivier Deprez157378f2022-04-04 15:47:50 +0200984 struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
985 int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000986 RING_IDX cons = queue->rx.rsp_cons;
987 struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
Olivier Deprez157378f2022-04-04 15:47:50 +0200988 struct xen_netif_extra_info *extras = rinfo->extras;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000989 grant_ref_t ref = xennet_get_rx_ref(queue, cons);
Olivier Deprez157378f2022-04-04 15:47:50 +0200990 struct device *dev = &queue->info->netdev->dev;
991 struct bpf_prog *xdp_prog;
992 struct xdp_buff xdp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000993 int slots = 1;
994 int err = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +0200995 u32 verdict;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000996
997 if (rx->flags & XEN_NETRXF_extra_info) {
998 err = xennet_get_extras(queue, extras, rp);
Olivier Deprez157378f2022-04-04 15:47:50 +0200999 if (!err) {
1000 if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) {
1001 struct xen_netif_extra_info *xdp;
1002
1003 xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
1004 rx->offset = xdp->u.xdp.headroom;
1005 }
1006 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001007 cons = queue->rx.rsp_cons;
1008 }
1009
1010 for (;;) {
1011 if (unlikely(rx->status < 0 ||
1012 rx->offset + rx->status > XEN_PAGE_SIZE)) {
1013 if (net_ratelimit())
1014 dev_warn(dev, "rx->offset: %u, size: %d\n",
1015 rx->offset, rx->status);
1016 xennet_move_rx_slot(queue, skb, ref);
1017 err = -EINVAL;
1018 goto next;
1019 }
1020
1021 /*
1022 * This definitely indicates a bug, either in this driver or in
1023 * the backend driver. In future this should flag the bad
1024 * situation to the system controller to reboot the backend.
1025 */
1026 if (ref == GRANT_INVALID_REF) {
1027 if (net_ratelimit())
1028 dev_warn(dev, "Bad rx response id %d.\n",
1029 rx->id);
1030 err = -EINVAL;
1031 goto next;
1032 }
1033
Olivier Deprez157378f2022-04-04 15:47:50 +02001034 if (!gnttab_end_foreign_access_ref(ref, 0)) {
1035 dev_alert(dev,
1036 "Grant still in use by backend domain\n");
1037 queue->info->broken = true;
1038 dev_alert(dev, "Disabled for further use\n");
1039 return -EINVAL;
1040 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001041
1042 gnttab_release_grant_reference(&queue->gref_rx_head, ref);
1043
Olivier Deprez157378f2022-04-04 15:47:50 +02001044 rcu_read_lock();
1045 xdp_prog = rcu_dereference(queue->xdp_prog);
1046 if (xdp_prog) {
1047 if (!(rx->flags & XEN_NETRXF_more_data)) {
1048 /* currently only a single page contains data */
1049 verdict = xennet_run_xdp(queue,
1050 skb_frag_page(&skb_shinfo(skb)->frags[0]),
1051 rx, xdp_prog, &xdp, need_xdp_flush);
1052 if (verdict != XDP_PASS)
1053 err = -EINVAL;
1054 } else {
1055 /* drop the frame */
1056 err = -EINVAL;
1057 }
1058 }
1059 rcu_read_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001060next:
Olivier Deprez157378f2022-04-04 15:47:50 +02001061 __skb_queue_tail(list, skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001062 if (!(rx->flags & XEN_NETRXF_more_data))
1063 break;
1064
1065 if (cons + slots == rp) {
1066 if (net_ratelimit())
1067 dev_warn(dev, "Need more slots\n");
1068 err = -ENOENT;
1069 break;
1070 }
1071
Olivier Deprez157378f2022-04-04 15:47:50 +02001072 RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
1073 rx = &rx_local;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001074 skb = xennet_get_rx_skb(queue, cons + slots);
1075 ref = xennet_get_rx_ref(queue, cons + slots);
1076 slots++;
1077 }
1078
1079 if (unlikely(slots > max)) {
1080 if (net_ratelimit())
1081 dev_warn(dev, "Too many slots\n");
1082 err = -E2BIG;
1083 }
1084
1085 if (unlikely(err))
Olivier Deprez157378f2022-04-04 15:47:50 +02001086 xennet_set_rx_rsp_cons(queue, cons + slots);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001087
1088 return err;
1089}
1090
1091static int xennet_set_skb_gso(struct sk_buff *skb,
1092 struct xen_netif_extra_info *gso)
1093{
1094 if (!gso->u.gso.size) {
1095 if (net_ratelimit())
1096 pr_warn("GSO size must not be zero\n");
1097 return -EINVAL;
1098 }
1099
1100 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
1101 gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
1102 if (net_ratelimit())
1103 pr_warn("Bad GSO type %d\n", gso->u.gso.type);
1104 return -EINVAL;
1105 }
1106
1107 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1108 skb_shinfo(skb)->gso_type =
1109 (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
1110 SKB_GSO_TCPV4 :
1111 SKB_GSO_TCPV6;
1112
1113 /* Header must be checked, and gso_segs computed. */
1114 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1115 skb_shinfo(skb)->gso_segs = 0;
1116
1117 return 0;
1118}
1119
David Brazdil0f672f62019-12-10 10:32:29 +00001120static int xennet_fill_frags(struct netfront_queue *queue,
1121 struct sk_buff *skb,
1122 struct sk_buff_head *list)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001123{
1124 RING_IDX cons = queue->rx.rsp_cons;
1125 struct sk_buff *nskb;
1126
1127 while ((nskb = __skb_dequeue(list))) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001128 struct xen_netif_rx_response rx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001129 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
1130
Olivier Deprez157378f2022-04-04 15:47:50 +02001131 RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
1132
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001133 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
1134 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1135
David Brazdil0f672f62019-12-10 10:32:29 +00001136 BUG_ON(pull_to < skb_headlen(skb));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001137 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1138 }
1139 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001140 xennet_set_rx_rsp_cons(queue,
1141 ++cons + skb_queue_len(list));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001142 kfree_skb(nskb);
David Brazdil0f672f62019-12-10 10:32:29 +00001143 return -ENOENT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001144 }
1145
1146 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1147 skb_frag_page(nfrag),
Olivier Deprez157378f2022-04-04 15:47:50 +02001148 rx.offset, rx.status, PAGE_SIZE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001149
1150 skb_shinfo(nskb)->nr_frags = 0;
1151 kfree_skb(nskb);
1152 }
1153
Olivier Deprez157378f2022-04-04 15:47:50 +02001154 xennet_set_rx_rsp_cons(queue, cons);
David Brazdil0f672f62019-12-10 10:32:29 +00001155
1156 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001157}
1158
1159static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
1160{
1161 bool recalculate_partial_csum = false;
1162
1163 /*
1164 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1165 * peers can fail to set NETRXF_csum_blank when sending a GSO
1166 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1167 * recalculate the partial checksum.
1168 */
1169 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1170 struct netfront_info *np = netdev_priv(dev);
1171 atomic_inc(&np->rx_gso_checksum_fixup);
1172 skb->ip_summed = CHECKSUM_PARTIAL;
1173 recalculate_partial_csum = true;
1174 }
1175
1176 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1177 if (skb->ip_summed != CHECKSUM_PARTIAL)
1178 return 0;
1179
1180 return skb_checksum_setup(skb, recalculate_partial_csum);
1181}
1182
1183static int handle_incoming_queue(struct netfront_queue *queue,
1184 struct sk_buff_head *rxq)
1185{
1186 struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
1187 int packets_dropped = 0;
1188 struct sk_buff *skb;
1189
1190 while ((skb = __skb_dequeue(rxq)) != NULL) {
1191 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1192
1193 if (pull_to > skb_headlen(skb))
1194 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1195
1196 /* Ethernet work: Delayed to here as it peeks the header. */
1197 skb->protocol = eth_type_trans(skb, queue->info->netdev);
1198 skb_reset_network_header(skb);
1199
1200 if (checksum_setup(queue->info->netdev, skb)) {
1201 kfree_skb(skb);
1202 packets_dropped++;
1203 queue->info->netdev->stats.rx_errors++;
1204 continue;
1205 }
1206
1207 u64_stats_update_begin(&rx_stats->syncp);
1208 rx_stats->packets++;
1209 rx_stats->bytes += skb->len;
1210 u64_stats_update_end(&rx_stats->syncp);
1211
1212 /* Pass it up. */
1213 napi_gro_receive(&queue->napi, skb);
1214 }
1215
1216 return packets_dropped;
1217}
1218
1219static int xennet_poll(struct napi_struct *napi, int budget)
1220{
1221 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
1222 struct net_device *dev = queue->info->netdev;
1223 struct sk_buff *skb;
1224 struct netfront_rx_info rinfo;
1225 struct xen_netif_rx_response *rx = &rinfo.rx;
1226 struct xen_netif_extra_info *extras = rinfo.extras;
1227 RING_IDX i, rp;
1228 int work_done;
1229 struct sk_buff_head rxq;
1230 struct sk_buff_head errq;
1231 struct sk_buff_head tmpq;
1232 int err;
Olivier Deprez157378f2022-04-04 15:47:50 +02001233 bool need_xdp_flush = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001234
1235 spin_lock(&queue->rx_lock);
1236
1237 skb_queue_head_init(&rxq);
1238 skb_queue_head_init(&errq);
1239 skb_queue_head_init(&tmpq);
1240
1241 rp = queue->rx.sring->rsp_prod;
Olivier Deprez157378f2022-04-04 15:47:50 +02001242 if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1243 dev_alert(&dev->dev, "Illegal number of responses %u\n",
1244 rp - queue->rx.rsp_cons);
1245 queue->info->broken = true;
1246 spin_unlock(&queue->rx_lock);
1247 return 0;
1248 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001249 rmb(); /* Ensure we see queued responses up to 'rp'. */
1250
1251 i = queue->rx.rsp_cons;
1252 work_done = 0;
1253 while ((i != rp) && (work_done < budget)) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001254 RING_COPY_RESPONSE(&queue->rx, i, rx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001255 memset(extras, 0, sizeof(rinfo.extras));
1256
Olivier Deprez157378f2022-04-04 15:47:50 +02001257 err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
1258 &need_xdp_flush);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001259
1260 if (unlikely(err)) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001261 if (queue->info->broken) {
1262 spin_unlock(&queue->rx_lock);
1263 return 0;
1264 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001265err:
1266 while ((skb = __skb_dequeue(&tmpq)))
1267 __skb_queue_tail(&errq, skb);
1268 dev->stats.rx_errors++;
1269 i = queue->rx.rsp_cons;
1270 continue;
1271 }
1272
1273 skb = __skb_dequeue(&tmpq);
1274
1275 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1276 struct xen_netif_extra_info *gso;
1277 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1278
1279 if (unlikely(xennet_set_skb_gso(skb, gso))) {
1280 __skb_queue_head(&tmpq, skb);
Olivier Deprez157378f2022-04-04 15:47:50 +02001281 xennet_set_rx_rsp_cons(queue,
1282 queue->rx.rsp_cons +
1283 skb_queue_len(&tmpq));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001284 goto err;
1285 }
1286 }
1287
1288 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1289 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1290 NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1291
David Brazdil0f672f62019-12-10 10:32:29 +00001292 skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001293 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1294 skb->data_len = rx->status;
1295 skb->len += rx->status;
1296
David Brazdil0f672f62019-12-10 10:32:29 +00001297 if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001298 goto err;
1299
1300 if (rx->flags & XEN_NETRXF_csum_blank)
1301 skb->ip_summed = CHECKSUM_PARTIAL;
1302 else if (rx->flags & XEN_NETRXF_data_validated)
1303 skb->ip_summed = CHECKSUM_UNNECESSARY;
1304
1305 __skb_queue_tail(&rxq, skb);
1306
Olivier Deprez157378f2022-04-04 15:47:50 +02001307 i = queue->rx.rsp_cons + 1;
1308 xennet_set_rx_rsp_cons(queue, i);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001309 work_done++;
1310 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001311 if (need_xdp_flush)
1312 xdp_do_flush();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001313
1314 __skb_queue_purge(&errq);
1315
1316 work_done -= handle_incoming_queue(queue, &rxq);
1317
1318 xennet_alloc_rx_buffers(queue);
1319
1320 if (work_done < budget) {
1321 int more_to_do = 0;
1322
1323 napi_complete_done(napi, work_done);
1324
1325 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1326 if (more_to_do)
1327 napi_schedule(napi);
1328 }
1329
1330 spin_unlock(&queue->rx_lock);
1331
1332 return work_done;
1333}
1334
1335static int xennet_change_mtu(struct net_device *dev, int mtu)
1336{
1337 int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1338
1339 if (mtu > max)
1340 return -EINVAL;
1341 dev->mtu = mtu;
1342 return 0;
1343}
1344
1345static void xennet_get_stats64(struct net_device *dev,
1346 struct rtnl_link_stats64 *tot)
1347{
1348 struct netfront_info *np = netdev_priv(dev);
1349 int cpu;
1350
1351 for_each_possible_cpu(cpu) {
1352 struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1353 struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1354 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1355 unsigned int start;
1356
1357 do {
1358 start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1359 tx_packets = tx_stats->packets;
1360 tx_bytes = tx_stats->bytes;
1361 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1362
1363 do {
1364 start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1365 rx_packets = rx_stats->packets;
1366 rx_bytes = rx_stats->bytes;
1367 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1368
1369 tot->rx_packets += rx_packets;
1370 tot->tx_packets += tx_packets;
1371 tot->rx_bytes += rx_bytes;
1372 tot->tx_bytes += tx_bytes;
1373 }
1374
1375 tot->rx_errors = dev->stats.rx_errors;
1376 tot->tx_dropped = dev->stats.tx_dropped;
1377}
1378
1379static void xennet_release_tx_bufs(struct netfront_queue *queue)
1380{
1381 struct sk_buff *skb;
1382 int i;
1383
1384 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1385 /* Skip over entries which are actually freelist references */
Olivier Deprez157378f2022-04-04 15:47:50 +02001386 if (!queue->tx_skbs[i])
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001387 continue;
1388
Olivier Deprez157378f2022-04-04 15:47:50 +02001389 skb = queue->tx_skbs[i];
1390 queue->tx_skbs[i] = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001391 get_page(queue->grant_tx_page[i]);
1392 gnttab_end_foreign_access(queue->grant_tx_ref[i],
1393 GNTMAP_readonly,
1394 (unsigned long)page_address(queue->grant_tx_page[i]));
1395 queue->grant_tx_page[i] = NULL;
1396 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
Olivier Deprez157378f2022-04-04 15:47:50 +02001397 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001398 dev_kfree_skb_irq(skb);
1399 }
1400}
1401
1402static void xennet_release_rx_bufs(struct netfront_queue *queue)
1403{
1404 int id, ref;
1405
1406 spin_lock_bh(&queue->rx_lock);
1407
1408 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1409 struct sk_buff *skb;
1410 struct page *page;
1411
1412 skb = queue->rx_skbs[id];
1413 if (!skb)
1414 continue;
1415
1416 ref = queue->grant_rx_ref[id];
1417 if (ref == GRANT_INVALID_REF)
1418 continue;
1419
1420 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1421
1422 /* gnttab_end_foreign_access() needs a page ref until
1423 * foreign access is ended (which may be deferred).
1424 */
1425 get_page(page);
1426 gnttab_end_foreign_access(ref, 0,
1427 (unsigned long)page_address(page));
1428 queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1429
1430 kfree_skb(skb);
1431 }
1432
1433 spin_unlock_bh(&queue->rx_lock);
1434}
1435
1436static netdev_features_t xennet_fix_features(struct net_device *dev,
1437 netdev_features_t features)
1438{
1439 struct netfront_info *np = netdev_priv(dev);
1440
1441 if (features & NETIF_F_SG &&
1442 !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1443 features &= ~NETIF_F_SG;
1444
1445 if (features & NETIF_F_IPV6_CSUM &&
1446 !xenbus_read_unsigned(np->xbdev->otherend,
1447 "feature-ipv6-csum-offload", 0))
1448 features &= ~NETIF_F_IPV6_CSUM;
1449
1450 if (features & NETIF_F_TSO &&
1451 !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1452 features &= ~NETIF_F_TSO;
1453
1454 if (features & NETIF_F_TSO6 &&
1455 !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1456 features &= ~NETIF_F_TSO6;
1457
1458 return features;
1459}
1460
1461static int xennet_set_features(struct net_device *dev,
1462 netdev_features_t features)
1463{
1464 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1465 netdev_info(dev, "Reducing MTU because no SG offload");
1466 dev->mtu = ETH_DATA_LEN;
1467 }
1468
1469 return 0;
1470}
1471
Olivier Deprez157378f2022-04-04 15:47:50 +02001472static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001473{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001474 unsigned long flags;
1475
Olivier Deprez157378f2022-04-04 15:47:50 +02001476 if (unlikely(queue->info->broken))
1477 return false;
1478
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001479 spin_lock_irqsave(&queue->tx_lock, flags);
Olivier Deprez157378f2022-04-04 15:47:50 +02001480 if (xennet_tx_buf_gc(queue))
1481 *eoi = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001482 spin_unlock_irqrestore(&queue->tx_lock, flags);
1483
Olivier Deprez157378f2022-04-04 15:47:50 +02001484 return true;
1485}
1486
1487static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1488{
1489 unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1490
1491 if (likely(xennet_handle_tx(dev_id, &eoiflag)))
1492 xen_irq_lateeoi(irq, eoiflag);
1493
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001494 return IRQ_HANDLED;
1495}
1496
Olivier Deprez157378f2022-04-04 15:47:50 +02001497static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
1498{
1499 unsigned int work_queued;
1500 unsigned long flags;
1501
1502 if (unlikely(queue->info->broken))
1503 return false;
1504
1505 spin_lock_irqsave(&queue->rx_cons_lock, flags);
1506 work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
1507 if (work_queued > queue->rx_rsp_unconsumed) {
1508 queue->rx_rsp_unconsumed = work_queued;
1509 *eoi = 0;
1510 } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
1511 const struct device *dev = &queue->info->netdev->dev;
1512
1513 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1514 dev_alert(dev, "RX producer index going backwards\n");
1515 dev_alert(dev, "Disabled for further use\n");
1516 queue->info->broken = true;
1517 return false;
1518 }
1519 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1520
1521 if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
1522 napi_schedule(&queue->napi);
1523
1524 return true;
1525}
1526
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001527static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1528{
Olivier Deprez157378f2022-04-04 15:47:50 +02001529 unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001530
Olivier Deprez157378f2022-04-04 15:47:50 +02001531 if (likely(xennet_handle_rx(dev_id, &eoiflag)))
1532 xen_irq_lateeoi(irq, eoiflag);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001533
1534 return IRQ_HANDLED;
1535}
1536
1537static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1538{
Olivier Deprez157378f2022-04-04 15:47:50 +02001539 unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1540
1541 if (xennet_handle_tx(dev_id, &eoiflag) &&
1542 xennet_handle_rx(dev_id, &eoiflag))
1543 xen_irq_lateeoi(irq, eoiflag);
1544
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001545 return IRQ_HANDLED;
1546}
1547
1548#ifdef CONFIG_NET_POLL_CONTROLLER
1549static void xennet_poll_controller(struct net_device *dev)
1550{
1551 /* Poll each queue */
1552 struct netfront_info *info = netdev_priv(dev);
1553 unsigned int num_queues = dev->real_num_tx_queues;
1554 unsigned int i;
Olivier Deprez157378f2022-04-04 15:47:50 +02001555
1556 if (info->broken)
1557 return;
1558
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001559 for (i = 0; i < num_queues; ++i)
1560 xennet_interrupt(0, &info->queues[i]);
1561}
1562#endif
1563
Olivier Deprez157378f2022-04-04 15:47:50 +02001564#define NETBACK_XDP_HEADROOM_DISABLE 0
1565#define NETBACK_XDP_HEADROOM_ENABLE 1
1566
1567static int talk_to_netback_xdp(struct netfront_info *np, int xdp)
1568{
1569 int err;
1570 unsigned short headroom;
1571
1572 headroom = xdp ? XDP_PACKET_HEADROOM : 0;
1573 err = xenbus_printf(XBT_NIL, np->xbdev->nodename,
1574 "xdp-headroom", "%hu",
1575 headroom);
1576 if (err)
1577 pr_warn("Error writing xdp-headroom\n");
1578
1579 return err;
1580}
1581
1582static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1583 struct netlink_ext_ack *extack)
1584{
1585 unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
1586 struct netfront_info *np = netdev_priv(dev);
1587 struct bpf_prog *old_prog;
1588 unsigned int i, err;
1589
1590 if (dev->mtu > max_mtu) {
1591 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu);
1592 return -EINVAL;
1593 }
1594
1595 if (!np->netback_has_xdp_headroom)
1596 return 0;
1597
1598 xenbus_switch_state(np->xbdev, XenbusStateReconfiguring);
1599
1600 err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE :
1601 NETBACK_XDP_HEADROOM_DISABLE);
1602 if (err)
1603 return err;
1604
1605 /* avoid the race with XDP headroom adjustment */
1606 wait_event(module_wq,
1607 xenbus_read_driver_state(np->xbdev->otherend) ==
1608 XenbusStateReconfigured);
1609 np->netfront_xdp_enabled = true;
1610
1611 old_prog = rtnl_dereference(np->queues[0].xdp_prog);
1612
1613 if (prog)
1614 bpf_prog_add(prog, dev->real_num_tx_queues);
1615
1616 for (i = 0; i < dev->real_num_tx_queues; ++i)
1617 rcu_assign_pointer(np->queues[i].xdp_prog, prog);
1618
1619 if (old_prog)
1620 for (i = 0; i < dev->real_num_tx_queues; ++i)
1621 bpf_prog_put(old_prog);
1622
1623 xenbus_switch_state(np->xbdev, XenbusStateConnected);
1624
1625 return 0;
1626}
1627
1628static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1629{
1630 struct netfront_info *np = netdev_priv(dev);
1631
1632 if (np->broken)
1633 return -ENODEV;
1634
1635 switch (xdp->command) {
1636 case XDP_SETUP_PROG:
1637 return xennet_xdp_set(dev, xdp->prog, xdp->extack);
1638 default:
1639 return -EINVAL;
1640 }
1641}
1642
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001643static const struct net_device_ops xennet_netdev_ops = {
Olivier Deprez157378f2022-04-04 15:47:50 +02001644 .ndo_uninit = xennet_uninit,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001645 .ndo_open = xennet_open,
1646 .ndo_stop = xennet_close,
1647 .ndo_start_xmit = xennet_start_xmit,
1648 .ndo_change_mtu = xennet_change_mtu,
1649 .ndo_get_stats64 = xennet_get_stats64,
1650 .ndo_set_mac_address = eth_mac_addr,
1651 .ndo_validate_addr = eth_validate_addr,
1652 .ndo_fix_features = xennet_fix_features,
1653 .ndo_set_features = xennet_set_features,
1654 .ndo_select_queue = xennet_select_queue,
Olivier Deprez157378f2022-04-04 15:47:50 +02001655 .ndo_bpf = xennet_xdp,
1656 .ndo_xdp_xmit = xennet_xdp_xmit,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001657#ifdef CONFIG_NET_POLL_CONTROLLER
1658 .ndo_poll_controller = xennet_poll_controller,
1659#endif
1660};
1661
1662static void xennet_free_netdev(struct net_device *netdev)
1663{
1664 struct netfront_info *np = netdev_priv(netdev);
1665
1666 free_percpu(np->rx_stats);
1667 free_percpu(np->tx_stats);
1668 free_netdev(netdev);
1669}
1670
1671static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1672{
1673 int err;
1674 struct net_device *netdev;
1675 struct netfront_info *np;
1676
1677 netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1678 if (!netdev)
1679 return ERR_PTR(-ENOMEM);
1680
1681 np = netdev_priv(netdev);
1682 np->xbdev = dev;
1683
1684 np->queues = NULL;
1685
1686 err = -ENOMEM;
1687 np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1688 if (np->rx_stats == NULL)
1689 goto exit;
1690 np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1691 if (np->tx_stats == NULL)
1692 goto exit;
1693
1694 netdev->netdev_ops = &xennet_netdev_ops;
1695
1696 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1697 NETIF_F_GSO_ROBUST;
1698 netdev->hw_features = NETIF_F_SG |
1699 NETIF_F_IPV6_CSUM |
1700 NETIF_F_TSO | NETIF_F_TSO6;
1701
1702 /*
1703 * Assume that all hw features are available for now. This set
1704 * will be adjusted by the call to netdev_update_features() in
1705 * xennet_connect() which is the earliest point where we can
1706 * negotiate with the backend regarding supported features.
1707 */
1708 netdev->features |= netdev->hw_features;
1709
1710 netdev->ethtool_ops = &xennet_ethtool_ops;
1711 netdev->min_mtu = ETH_MIN_MTU;
1712 netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1713 SET_NETDEV_DEV(netdev, &dev->dev);
1714
1715 np->netdev = netdev;
Olivier Deprez157378f2022-04-04 15:47:50 +02001716 np->netfront_xdp_enabled = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001717
1718 netif_carrier_off(netdev);
1719
Olivier Deprez0e641232021-09-23 10:07:05 +02001720 do {
1721 xenbus_switch_state(dev, XenbusStateInitialising);
1722 err = wait_event_timeout(module_wq,
1723 xenbus_read_driver_state(dev->otherend) !=
1724 XenbusStateClosed &&
1725 xenbus_read_driver_state(dev->otherend) !=
1726 XenbusStateUnknown, XENNET_TIMEOUT);
1727 } while (!err);
1728
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001729 return netdev;
1730
1731 exit:
1732 xennet_free_netdev(netdev);
1733 return ERR_PTR(err);
1734}
1735
1736/**
1737 * Entry point to this code when a new device is created. Allocate the basic
1738 * structures and the ring buffers for communication with the backend, and
1739 * inform the backend of the appropriate details for those.
1740 */
1741static int netfront_probe(struct xenbus_device *dev,
1742 const struct xenbus_device_id *id)
1743{
1744 int err;
1745 struct net_device *netdev;
1746 struct netfront_info *info;
1747
1748 netdev = xennet_create_dev(dev);
1749 if (IS_ERR(netdev)) {
1750 err = PTR_ERR(netdev);
1751 xenbus_dev_fatal(dev, err, "creating netdev");
1752 return err;
1753 }
1754
1755 info = netdev_priv(netdev);
1756 dev_set_drvdata(&dev->dev, info);
1757#ifdef CONFIG_SYSFS
1758 info->netdev->sysfs_groups[0] = &xennet_dev_group;
1759#endif
1760
1761 return 0;
1762}
1763
1764static void xennet_end_access(int ref, void *page)
1765{
1766 /* This frees the page as a side-effect */
1767 if (ref != GRANT_INVALID_REF)
1768 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1769}
1770
1771static void xennet_disconnect_backend(struct netfront_info *info)
1772{
1773 unsigned int i = 0;
1774 unsigned int num_queues = info->netdev->real_num_tx_queues;
1775
1776 netif_carrier_off(info->netdev);
1777
1778 for (i = 0; i < num_queues && info->queues; ++i) {
1779 struct netfront_queue *queue = &info->queues[i];
1780
1781 del_timer_sync(&queue->rx_refill_timer);
1782
1783 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1784 unbind_from_irqhandler(queue->tx_irq, queue);
1785 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1786 unbind_from_irqhandler(queue->tx_irq, queue);
1787 unbind_from_irqhandler(queue->rx_irq, queue);
1788 }
1789 queue->tx_evtchn = queue->rx_evtchn = 0;
1790 queue->tx_irq = queue->rx_irq = 0;
1791
1792 if (netif_running(info->netdev))
1793 napi_synchronize(&queue->napi);
1794
1795 xennet_release_tx_bufs(queue);
1796 xennet_release_rx_bufs(queue);
1797 gnttab_free_grant_references(queue->gref_tx_head);
1798 gnttab_free_grant_references(queue->gref_rx_head);
1799
1800 /* End access and free the pages */
1801 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1802 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1803
1804 queue->tx_ring_ref = GRANT_INVALID_REF;
1805 queue->rx_ring_ref = GRANT_INVALID_REF;
1806 queue->tx.sring = NULL;
1807 queue->rx.sring = NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +02001808
1809 page_pool_destroy(queue->page_pool);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001810 }
1811}
1812
1813/**
1814 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1815 * driver restart. We tear down our netif structure and recreate it, but
1816 * leave the device-layer structures intact so that this is transparent to the
1817 * rest of the kernel.
1818 */
1819static int netfront_resume(struct xenbus_device *dev)
1820{
1821 struct netfront_info *info = dev_get_drvdata(&dev->dev);
1822
1823 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1824
Olivier Deprez157378f2022-04-04 15:47:50 +02001825 netif_tx_lock_bh(info->netdev);
1826 netif_device_detach(info->netdev);
1827 netif_tx_unlock_bh(info->netdev);
1828
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001829 xennet_disconnect_backend(info);
1830 return 0;
1831}
1832
1833static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1834{
1835 char *s, *e, *macstr;
1836 int i;
1837
1838 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1839 if (IS_ERR(macstr))
1840 return PTR_ERR(macstr);
1841
1842 for (i = 0; i < ETH_ALEN; i++) {
1843 mac[i] = simple_strtoul(s, &e, 16);
1844 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1845 kfree(macstr);
1846 return -ENOENT;
1847 }
1848 s = e+1;
1849 }
1850
1851 kfree(macstr);
1852 return 0;
1853}
1854
1855static int setup_netfront_single(struct netfront_queue *queue)
1856{
1857 int err;
1858
1859 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1860 if (err < 0)
1861 goto fail;
1862
Olivier Deprez157378f2022-04-04 15:47:50 +02001863 err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1864 xennet_interrupt, 0,
1865 queue->info->netdev->name,
1866 queue);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001867 if (err < 0)
1868 goto bind_fail;
1869 queue->rx_evtchn = queue->tx_evtchn;
1870 queue->rx_irq = queue->tx_irq = err;
1871
1872 return 0;
1873
1874bind_fail:
1875 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1876 queue->tx_evtchn = 0;
1877fail:
1878 return err;
1879}
1880
1881static int setup_netfront_split(struct netfront_queue *queue)
1882{
1883 int err;
1884
1885 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1886 if (err < 0)
1887 goto fail;
1888 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1889 if (err < 0)
1890 goto alloc_rx_evtchn_fail;
1891
1892 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1893 "%s-tx", queue->name);
Olivier Deprez157378f2022-04-04 15:47:50 +02001894 err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1895 xennet_tx_interrupt, 0,
1896 queue->tx_irq_name, queue);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001897 if (err < 0)
1898 goto bind_tx_fail;
1899 queue->tx_irq = err;
1900
1901 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1902 "%s-rx", queue->name);
Olivier Deprez157378f2022-04-04 15:47:50 +02001903 err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
1904 xennet_rx_interrupt, 0,
1905 queue->rx_irq_name, queue);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001906 if (err < 0)
1907 goto bind_rx_fail;
1908 queue->rx_irq = err;
1909
1910 return 0;
1911
1912bind_rx_fail:
1913 unbind_from_irqhandler(queue->tx_irq, queue);
1914 queue->tx_irq = 0;
1915bind_tx_fail:
1916 xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1917 queue->rx_evtchn = 0;
1918alloc_rx_evtchn_fail:
1919 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1920 queue->tx_evtchn = 0;
1921fail:
1922 return err;
1923}
1924
1925static int setup_netfront(struct xenbus_device *dev,
1926 struct netfront_queue *queue, unsigned int feature_split_evtchn)
1927{
1928 struct xen_netif_tx_sring *txs;
Olivier Deprez157378f2022-04-04 15:47:50 +02001929 struct xen_netif_rx_sring *rxs = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001930 grant_ref_t gref;
1931 int err;
1932
1933 queue->tx_ring_ref = GRANT_INVALID_REF;
1934 queue->rx_ring_ref = GRANT_INVALID_REF;
1935 queue->rx.sring = NULL;
1936 queue->tx.sring = NULL;
1937
1938 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1939 if (!txs) {
1940 err = -ENOMEM;
1941 xenbus_dev_fatal(dev, err, "allocating tx ring page");
1942 goto fail;
1943 }
1944 SHARED_RING_INIT(txs);
1945 FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1946
1947 err = xenbus_grant_ring(dev, txs, 1, &gref);
1948 if (err < 0)
Olivier Deprez157378f2022-04-04 15:47:50 +02001949 goto fail;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001950 queue->tx_ring_ref = gref;
1951
1952 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1953 if (!rxs) {
1954 err = -ENOMEM;
1955 xenbus_dev_fatal(dev, err, "allocating rx ring page");
Olivier Deprez157378f2022-04-04 15:47:50 +02001956 goto fail;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001957 }
1958 SHARED_RING_INIT(rxs);
1959 FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1960
1961 err = xenbus_grant_ring(dev, rxs, 1, &gref);
1962 if (err < 0)
Olivier Deprez157378f2022-04-04 15:47:50 +02001963 goto fail;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001964 queue->rx_ring_ref = gref;
1965
1966 if (feature_split_evtchn)
1967 err = setup_netfront_split(queue);
1968 /* setup single event channel if
1969 * a) feature-split-event-channels == 0
1970 * b) feature-split-event-channels == 1 but failed to setup
1971 */
1972 if (!feature_split_evtchn || (feature_split_evtchn && err))
1973 err = setup_netfront_single(queue);
1974
1975 if (err)
Olivier Deprez157378f2022-04-04 15:47:50 +02001976 goto fail;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001977
1978 return 0;
1979
1980 /* If we fail to setup netfront, it is safe to just revoke access to
1981 * granted pages because backend is not accessing it at this point.
1982 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001983 fail:
1984 if (queue->rx_ring_ref != GRANT_INVALID_REF) {
1985 gnttab_end_foreign_access(queue->rx_ring_ref, 0,
1986 (unsigned long)rxs);
1987 queue->rx_ring_ref = GRANT_INVALID_REF;
1988 } else {
1989 free_page((unsigned long)rxs);
1990 }
1991 if (queue->tx_ring_ref != GRANT_INVALID_REF) {
1992 gnttab_end_foreign_access(queue->tx_ring_ref, 0,
1993 (unsigned long)txs);
1994 queue->tx_ring_ref = GRANT_INVALID_REF;
1995 } else {
1996 free_page((unsigned long)txs);
1997 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001998 return err;
1999}
2000
2001/* Queue-specific initialisation
2002 * This used to be done in xennet_create_dev() but must now
2003 * be run per-queue.
2004 */
2005static int xennet_init_queue(struct netfront_queue *queue)
2006{
2007 unsigned short i;
2008 int err = 0;
2009 char *devid;
2010
2011 spin_lock_init(&queue->tx_lock);
2012 spin_lock_init(&queue->rx_lock);
Olivier Deprez157378f2022-04-04 15:47:50 +02002013 spin_lock_init(&queue->rx_cons_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002014
2015 timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
2016
2017 devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
2018 snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
2019 devid, queue->id);
2020
Olivier Deprez157378f2022-04-04 15:47:50 +02002021 /* Initialise tx_skb_freelist as a free chain containing every entry. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002022 queue->tx_skb_freelist = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02002023 queue->tx_pend_queue = TX_LINK_NONE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002024 for (i = 0; i < NET_TX_RING_SIZE; i++) {
Olivier Deprez157378f2022-04-04 15:47:50 +02002025 queue->tx_link[i] = i + 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002026 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
2027 queue->grant_tx_page[i] = NULL;
2028 }
Olivier Deprez157378f2022-04-04 15:47:50 +02002029 queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002030
2031 /* Clear out rx_skbs */
2032 for (i = 0; i < NET_RX_RING_SIZE; i++) {
2033 queue->rx_skbs[i] = NULL;
2034 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
2035 }
2036
2037 /* A grant for every tx ring slot */
2038 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
2039 &queue->gref_tx_head) < 0) {
2040 pr_alert("can't alloc tx grant refs\n");
2041 err = -ENOMEM;
2042 goto exit;
2043 }
2044
2045 /* A grant for every rx ring slot */
2046 if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
2047 &queue->gref_rx_head) < 0) {
2048 pr_alert("can't alloc rx grant refs\n");
2049 err = -ENOMEM;
2050 goto exit_free_tx;
2051 }
2052
2053 return 0;
2054
2055 exit_free_tx:
2056 gnttab_free_grant_references(queue->gref_tx_head);
2057 exit:
2058 return err;
2059}
2060
2061static int write_queue_xenstore_keys(struct netfront_queue *queue,
2062 struct xenbus_transaction *xbt, int write_hierarchical)
2063{
2064 /* Write the queue-specific keys into XenStore in the traditional
2065 * way for a single queue, or in a queue subkeys for multiple
2066 * queues.
2067 */
2068 struct xenbus_device *dev = queue->info->xbdev;
2069 int err;
2070 const char *message;
2071 char *path;
2072 size_t pathsize;
2073
2074 /* Choose the correct place to write the keys */
2075 if (write_hierarchical) {
2076 pathsize = strlen(dev->nodename) + 10;
2077 path = kzalloc(pathsize, GFP_KERNEL);
2078 if (!path) {
2079 err = -ENOMEM;
2080 message = "out of memory while writing ring references";
2081 goto error;
2082 }
2083 snprintf(path, pathsize, "%s/queue-%u",
2084 dev->nodename, queue->id);
2085 } else {
2086 path = (char *)dev->nodename;
2087 }
2088
2089 /* Write ring references */
2090 err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
2091 queue->tx_ring_ref);
2092 if (err) {
2093 message = "writing tx-ring-ref";
2094 goto error;
2095 }
2096
2097 err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
2098 queue->rx_ring_ref);
2099 if (err) {
2100 message = "writing rx-ring-ref";
2101 goto error;
2102 }
2103
2104 /* Write event channels; taking into account both shared
2105 * and split event channel scenarios.
2106 */
2107 if (queue->tx_evtchn == queue->rx_evtchn) {
2108 /* Shared event channel */
2109 err = xenbus_printf(*xbt, path,
2110 "event-channel", "%u", queue->tx_evtchn);
2111 if (err) {
2112 message = "writing event-channel";
2113 goto error;
2114 }
2115 } else {
2116 /* Split event channels */
2117 err = xenbus_printf(*xbt, path,
2118 "event-channel-tx", "%u", queue->tx_evtchn);
2119 if (err) {
2120 message = "writing event-channel-tx";
2121 goto error;
2122 }
2123
2124 err = xenbus_printf(*xbt, path,
2125 "event-channel-rx", "%u", queue->rx_evtchn);
2126 if (err) {
2127 message = "writing event-channel-rx";
2128 goto error;
2129 }
2130 }
2131
2132 if (write_hierarchical)
2133 kfree(path);
2134 return 0;
2135
2136error:
2137 if (write_hierarchical)
2138 kfree(path);
2139 xenbus_dev_fatal(dev, err, "%s", message);
2140 return err;
2141}
2142
Olivier Deprez157378f2022-04-04 15:47:50 +02002143
2144
2145static int xennet_create_page_pool(struct netfront_queue *queue)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002146{
Olivier Deprez157378f2022-04-04 15:47:50 +02002147 int err;
2148 struct page_pool_params pp_params = {
2149 .order = 0,
2150 .flags = 0,
2151 .pool_size = NET_RX_RING_SIZE,
2152 .nid = NUMA_NO_NODE,
2153 .dev = &queue->info->netdev->dev,
2154 .offset = XDP_PACKET_HEADROOM,
2155 .max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
2156 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002157
Olivier Deprez157378f2022-04-04 15:47:50 +02002158 queue->page_pool = page_pool_create(&pp_params);
2159 if (IS_ERR(queue->page_pool)) {
2160 err = PTR_ERR(queue->page_pool);
2161 queue->page_pool = NULL;
2162 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002163 }
2164
Olivier Deprez157378f2022-04-04 15:47:50 +02002165 err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
2166 queue->id);
2167 if (err) {
2168 netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
2169 goto err_free_pp;
2170 }
2171
2172 err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
2173 MEM_TYPE_PAGE_POOL, queue->page_pool);
2174 if (err) {
2175 netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
2176 goto err_unregister_rxq;
2177 }
2178 return 0;
2179
2180err_unregister_rxq:
2181 xdp_rxq_info_unreg(&queue->xdp_rxq);
2182err_free_pp:
2183 page_pool_destroy(queue->page_pool);
2184 queue->page_pool = NULL;
2185 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002186}
2187
2188static int xennet_create_queues(struct netfront_info *info,
2189 unsigned int *num_queues)
2190{
2191 unsigned int i;
2192 int ret;
2193
2194 info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
2195 GFP_KERNEL);
2196 if (!info->queues)
2197 return -ENOMEM;
2198
2199 for (i = 0; i < *num_queues; i++) {
2200 struct netfront_queue *queue = &info->queues[i];
2201
2202 queue->id = i;
2203 queue->info = info;
2204
2205 ret = xennet_init_queue(queue);
2206 if (ret < 0) {
2207 dev_warn(&info->xbdev->dev,
2208 "only created %d queues\n", i);
2209 *num_queues = i;
2210 break;
2211 }
2212
Olivier Deprez157378f2022-04-04 15:47:50 +02002213 /* use page pool recycling instead of buddy allocator */
2214 ret = xennet_create_page_pool(queue);
2215 if (ret < 0) {
2216 dev_err(&info->xbdev->dev, "can't allocate page pool\n");
2217 *num_queues = i;
2218 return ret;
2219 }
2220
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002221 netif_napi_add(queue->info->netdev, &queue->napi,
2222 xennet_poll, 64);
2223 if (netif_running(info->netdev))
2224 napi_enable(&queue->napi);
2225 }
2226
2227 netif_set_real_num_tx_queues(info->netdev, *num_queues);
2228
2229 if (*num_queues == 0) {
2230 dev_err(&info->xbdev->dev, "no queues\n");
2231 return -EINVAL;
2232 }
2233 return 0;
2234}
2235
2236/* Common code used when first setting up, and when resuming. */
2237static int talk_to_netback(struct xenbus_device *dev,
2238 struct netfront_info *info)
2239{
2240 const char *message;
2241 struct xenbus_transaction xbt;
2242 int err;
2243 unsigned int feature_split_evtchn;
2244 unsigned int i = 0;
2245 unsigned int max_queues = 0;
2246 struct netfront_queue *queue = NULL;
2247 unsigned int num_queues = 1;
2248
2249 info->netdev->irq = 0;
2250
2251 /* Check if backend supports multiple queues */
2252 max_queues = xenbus_read_unsigned(info->xbdev->otherend,
2253 "multi-queue-max-queues", 1);
2254 num_queues = min(max_queues, xennet_max_queues);
2255
2256 /* Check feature-split-event-channels */
2257 feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
2258 "feature-split-event-channels", 0);
2259
2260 /* Read mac addr. */
2261 err = xen_net_read_mac(dev, info->netdev->dev_addr);
2262 if (err) {
2263 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
2264 goto out_unlocked;
2265 }
2266
Olivier Deprez157378f2022-04-04 15:47:50 +02002267 info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
2268 "feature-xdp-headroom", 0);
2269 if (info->netback_has_xdp_headroom) {
2270 /* set the current xen-netfront xdp state */
2271 err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ?
2272 NETBACK_XDP_HEADROOM_ENABLE :
2273 NETBACK_XDP_HEADROOM_DISABLE);
2274 if (err)
2275 goto out_unlocked;
2276 }
2277
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002278 rtnl_lock();
2279 if (info->queues)
2280 xennet_destroy_queues(info);
2281
Olivier Deprez157378f2022-04-04 15:47:50 +02002282 /* For the case of a reconnect reset the "broken" indicator. */
2283 info->broken = false;
2284
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002285 err = xennet_create_queues(info, &num_queues);
2286 if (err < 0) {
2287 xenbus_dev_fatal(dev, err, "creating queues");
2288 kfree(info->queues);
2289 info->queues = NULL;
2290 goto out;
2291 }
2292 rtnl_unlock();
2293
2294 /* Create shared ring, alloc event channel -- for each queue */
2295 for (i = 0; i < num_queues; ++i) {
2296 queue = &info->queues[i];
2297 err = setup_netfront(dev, queue, feature_split_evtchn);
2298 if (err)
2299 goto destroy_ring;
2300 }
2301
2302again:
2303 err = xenbus_transaction_start(&xbt);
2304 if (err) {
2305 xenbus_dev_fatal(dev, err, "starting transaction");
2306 goto destroy_ring;
2307 }
2308
2309 if (xenbus_exists(XBT_NIL,
2310 info->xbdev->otherend, "multi-queue-max-queues")) {
2311 /* Write the number of queues */
2312 err = xenbus_printf(xbt, dev->nodename,
2313 "multi-queue-num-queues", "%u", num_queues);
2314 if (err) {
2315 message = "writing multi-queue-num-queues";
2316 goto abort_transaction_no_dev_fatal;
2317 }
2318 }
2319
2320 if (num_queues == 1) {
2321 err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
2322 if (err)
2323 goto abort_transaction_no_dev_fatal;
2324 } else {
2325 /* Write the keys for each queue */
2326 for (i = 0; i < num_queues; ++i) {
2327 queue = &info->queues[i];
2328 err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
2329 if (err)
2330 goto abort_transaction_no_dev_fatal;
2331 }
2332 }
2333
2334 /* The remaining keys are not queue-specific */
2335 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
2336 1);
2337 if (err) {
2338 message = "writing request-rx-copy";
2339 goto abort_transaction;
2340 }
2341
2342 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
2343 if (err) {
2344 message = "writing feature-rx-notify";
2345 goto abort_transaction;
2346 }
2347
2348 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
2349 if (err) {
2350 message = "writing feature-sg";
2351 goto abort_transaction;
2352 }
2353
2354 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
2355 if (err) {
2356 message = "writing feature-gso-tcpv4";
2357 goto abort_transaction;
2358 }
2359
2360 err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
2361 if (err) {
2362 message = "writing feature-gso-tcpv6";
2363 goto abort_transaction;
2364 }
2365
2366 err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
2367 "1");
2368 if (err) {
2369 message = "writing feature-ipv6-csum-offload";
2370 goto abort_transaction;
2371 }
2372
2373 err = xenbus_transaction_end(xbt, 0);
2374 if (err) {
2375 if (err == -EAGAIN)
2376 goto again;
2377 xenbus_dev_fatal(dev, err, "completing transaction");
2378 goto destroy_ring;
2379 }
2380
2381 return 0;
2382
2383 abort_transaction:
2384 xenbus_dev_fatal(dev, err, "%s", message);
2385abort_transaction_no_dev_fatal:
2386 xenbus_transaction_end(xbt, 1);
2387 destroy_ring:
2388 xennet_disconnect_backend(info);
2389 rtnl_lock();
2390 xennet_destroy_queues(info);
2391 out:
2392 rtnl_unlock();
2393out_unlocked:
2394 device_unregister(&dev->dev);
2395 return err;
2396}
2397
2398static int xennet_connect(struct net_device *dev)
2399{
2400 struct netfront_info *np = netdev_priv(dev);
2401 unsigned int num_queues = 0;
2402 int err;
2403 unsigned int j = 0;
2404 struct netfront_queue *queue = NULL;
2405
2406 if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
2407 dev_info(&dev->dev,
2408 "backend does not support copying receive path\n");
2409 return -ENODEV;
2410 }
2411
2412 err = talk_to_netback(np->xbdev, np);
2413 if (err)
2414 return err;
Olivier Deprez157378f2022-04-04 15:47:50 +02002415 if (np->netback_has_xdp_headroom)
2416 pr_info("backend supports XDP headroom\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002417
2418 /* talk_to_netback() sets the correct number of queues */
2419 num_queues = dev->real_num_tx_queues;
2420
2421 if (dev->reg_state == NETREG_UNINITIALIZED) {
2422 err = register_netdev(dev);
2423 if (err) {
2424 pr_warn("%s: register_netdev err=%d\n", __func__, err);
2425 device_unregister(&np->xbdev->dev);
2426 return err;
2427 }
2428 }
2429
2430 rtnl_lock();
2431 netdev_update_features(dev);
2432 rtnl_unlock();
2433
2434 /*
2435 * All public and private state should now be sane. Get
2436 * ready to start sending and receiving packets and give the driver
2437 * domain a kick because we've probably just requeued some
2438 * packets.
2439 */
Olivier Deprez157378f2022-04-04 15:47:50 +02002440 netif_tx_lock_bh(np->netdev);
2441 netif_device_attach(np->netdev);
2442 netif_tx_unlock_bh(np->netdev);
2443
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002444 netif_carrier_on(np->netdev);
2445 for (j = 0; j < num_queues; ++j) {
2446 queue = &np->queues[j];
2447
2448 notify_remote_via_irq(queue->tx_irq);
2449 if (queue->tx_irq != queue->rx_irq)
2450 notify_remote_via_irq(queue->rx_irq);
2451
2452 spin_lock_irq(&queue->tx_lock);
2453 xennet_tx_buf_gc(queue);
2454 spin_unlock_irq(&queue->tx_lock);
2455
2456 spin_lock_bh(&queue->rx_lock);
2457 xennet_alloc_rx_buffers(queue);
2458 spin_unlock_bh(&queue->rx_lock);
2459 }
2460
2461 return 0;
2462}
2463
2464/**
2465 * Callback received when the backend's state changes.
2466 */
2467static void netback_changed(struct xenbus_device *dev,
2468 enum xenbus_state backend_state)
2469{
2470 struct netfront_info *np = dev_get_drvdata(&dev->dev);
2471 struct net_device *netdev = np->netdev;
2472
2473 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2474
2475 wake_up_all(&module_wq);
2476
2477 switch (backend_state) {
2478 case XenbusStateInitialising:
2479 case XenbusStateInitialised:
2480 case XenbusStateReconfiguring:
2481 case XenbusStateReconfigured:
2482 case XenbusStateUnknown:
2483 break;
2484
2485 case XenbusStateInitWait:
2486 if (dev->state != XenbusStateInitialising)
2487 break;
2488 if (xennet_connect(netdev) != 0)
2489 break;
2490 xenbus_switch_state(dev, XenbusStateConnected);
2491 break;
2492
2493 case XenbusStateConnected:
2494 netdev_notify_peers(netdev);
2495 break;
2496
2497 case XenbusStateClosed:
2498 if (dev->state == XenbusStateClosed)
2499 break;
Olivier Deprez157378f2022-04-04 15:47:50 +02002500 fallthrough; /* Missed the backend's CLOSING state */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002501 case XenbusStateClosing:
2502 xenbus_frontend_closed(dev);
2503 break;
2504 }
2505}
2506
2507static const struct xennet_stat {
2508 char name[ETH_GSTRING_LEN];
2509 u16 offset;
2510} xennet_stats[] = {
2511 {
2512 "rx_gso_checksum_fixup",
2513 offsetof(struct netfront_info, rx_gso_checksum_fixup)
2514 },
2515};
2516
2517static int xennet_get_sset_count(struct net_device *dev, int string_set)
2518{
2519 switch (string_set) {
2520 case ETH_SS_STATS:
2521 return ARRAY_SIZE(xennet_stats);
2522 default:
2523 return -EINVAL;
2524 }
2525}
2526
2527static void xennet_get_ethtool_stats(struct net_device *dev,
2528 struct ethtool_stats *stats, u64 * data)
2529{
2530 void *np = netdev_priv(dev);
2531 int i;
2532
2533 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2534 data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2535}
2536
2537static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2538{
2539 int i;
2540
2541 switch (stringset) {
2542 case ETH_SS_STATS:
2543 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2544 memcpy(data + i * ETH_GSTRING_LEN,
2545 xennet_stats[i].name, ETH_GSTRING_LEN);
2546 break;
2547 }
2548}
2549
2550static const struct ethtool_ops xennet_ethtool_ops =
2551{
2552 .get_link = ethtool_op_get_link,
2553
2554 .get_sset_count = xennet_get_sset_count,
2555 .get_ethtool_stats = xennet_get_ethtool_stats,
2556 .get_strings = xennet_get_strings,
Olivier Deprez157378f2022-04-04 15:47:50 +02002557 .get_ts_info = ethtool_op_get_ts_info,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002558};
2559
2560#ifdef CONFIG_SYSFS
2561static ssize_t show_rxbuf(struct device *dev,
2562 struct device_attribute *attr, char *buf)
2563{
2564 return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2565}
2566
2567static ssize_t store_rxbuf(struct device *dev,
2568 struct device_attribute *attr,
2569 const char *buf, size_t len)
2570{
2571 char *endp;
2572 unsigned long target;
2573
2574 if (!capable(CAP_NET_ADMIN))
2575 return -EPERM;
2576
2577 target = simple_strtoul(buf, &endp, 0);
2578 if (endp == buf)
2579 return -EBADMSG;
2580
2581 /* rxbuf_min and rxbuf_max are no longer configurable. */
2582
2583 return len;
2584}
2585
2586static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
2587static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
2588static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
2589
2590static struct attribute *xennet_dev_attrs[] = {
2591 &dev_attr_rxbuf_min.attr,
2592 &dev_attr_rxbuf_max.attr,
2593 &dev_attr_rxbuf_cur.attr,
2594 NULL
2595};
2596
2597static const struct attribute_group xennet_dev_group = {
2598 .attrs = xennet_dev_attrs
2599};
2600#endif /* CONFIG_SYSFS */
2601
Olivier Deprez0e641232021-09-23 10:07:05 +02002602static void xennet_bus_close(struct xenbus_device *dev)
2603{
2604 int ret;
2605
2606 if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2607 return;
2608 do {
2609 xenbus_switch_state(dev, XenbusStateClosing);
2610 ret = wait_event_timeout(module_wq,
2611 xenbus_read_driver_state(dev->otherend) ==
2612 XenbusStateClosing ||
2613 xenbus_read_driver_state(dev->otherend) ==
2614 XenbusStateClosed ||
2615 xenbus_read_driver_state(dev->otherend) ==
2616 XenbusStateUnknown,
2617 XENNET_TIMEOUT);
2618 } while (!ret);
2619
2620 if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2621 return;
2622
2623 do {
2624 xenbus_switch_state(dev, XenbusStateClosed);
2625 ret = wait_event_timeout(module_wq,
2626 xenbus_read_driver_state(dev->otherend) ==
2627 XenbusStateClosed ||
2628 xenbus_read_driver_state(dev->otherend) ==
2629 XenbusStateUnknown,
2630 XENNET_TIMEOUT);
2631 } while (!ret);
2632}
2633
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002634static int xennet_remove(struct xenbus_device *dev)
2635{
2636 struct netfront_info *info = dev_get_drvdata(&dev->dev);
2637
Olivier Deprez0e641232021-09-23 10:07:05 +02002638 xennet_bus_close(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002639 xennet_disconnect_backend(info);
2640
2641 if (info->netdev->reg_state == NETREG_REGISTERED)
2642 unregister_netdev(info->netdev);
2643
2644 if (info->queues) {
2645 rtnl_lock();
2646 xennet_destroy_queues(info);
2647 rtnl_unlock();
2648 }
2649 xennet_free_netdev(info->netdev);
2650
2651 return 0;
2652}
2653
2654static const struct xenbus_device_id netfront_ids[] = {
2655 { "vif" },
2656 { "" }
2657};
2658
2659static struct xenbus_driver netfront_driver = {
2660 .ids = netfront_ids,
2661 .probe = netfront_probe,
2662 .remove = xennet_remove,
2663 .resume = netfront_resume,
2664 .otherend_changed = netback_changed,
2665};
2666
2667static int __init netif_init(void)
2668{
2669 if (!xen_domain())
2670 return -ENODEV;
2671
2672 if (!xen_has_pv_nic_devices())
2673 return -ENODEV;
2674
2675 pr_info("Initialising Xen virtual ethernet driver\n");
2676
2677 /* Allow as many queues as there are CPUs inut max. 8 if user has not
2678 * specified a value.
2679 */
2680 if (xennet_max_queues == 0)
2681 xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
2682 num_online_cpus());
2683
2684 return xenbus_register_frontend(&netfront_driver);
2685}
2686module_init(netif_init);
2687
2688
2689static void __exit netif_exit(void)
2690{
2691 xenbus_unregister_driver(&netfront_driver);
2692}
2693module_exit(netif_exit);
2694
2695MODULE_DESCRIPTION("Xen virtual network device frontend");
2696MODULE_LICENSE("GPL");
2697MODULE_ALIAS("xen:vif");
2698MODULE_ALIAS("xennet");