Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 44ad412..f7e3eb3 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -34,16 +34,23 @@
#define VETH_RING_SIZE 256
#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
-/* Separating two types of XDP xmit */
-#define VETH_XDP_TX BIT(0)
-#define VETH_XDP_REDIR BIT(1)
-
#define VETH_XDP_TX_BULK_SIZE 16
+struct veth_stats {
+ u64 rx_drops;
+ /* xdp */
+ u64 xdp_packets;
+ u64 xdp_bytes;
+ u64 xdp_redirect;
+ u64 xdp_drops;
+ u64 xdp_tx;
+ u64 xdp_tx_err;
+ u64 peer_tq_xdp_xmit;
+ u64 peer_tq_xdp_xmit_err;
+};
+
struct veth_rq_stats {
- u64 xdp_packets;
- u64 xdp_bytes;
- u64 xdp_drops;
+ struct veth_stats vs;
struct u64_stats_sync syncp;
};
@@ -80,16 +87,27 @@
size_t offset;
};
-#define VETH_RQ_STAT(m) offsetof(struct veth_rq_stats, m)
+#define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
{ "xdp_packets", VETH_RQ_STAT(xdp_packets) },
{ "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
+ { "drops", VETH_RQ_STAT(rx_drops) },
+ { "xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
{ "xdp_drops", VETH_RQ_STAT(xdp_drops) },
+ { "xdp_tx", VETH_RQ_STAT(xdp_tx) },
+ { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
};
#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
+static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
+ { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit) },
+ { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
+};
+
+#define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
+
static struct {
const char string[ETH_GSTRING_LEN];
} ethtool_stats_keys[] = {
@@ -124,11 +142,19 @@
for (i = 0; i < dev->real_num_rx_queues; i++) {
for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
snprintf(p, ETH_GSTRING_LEN,
- "rx_queue_%u_%.11s",
+ "rx_queue_%u_%.18s",
i, veth_rq_stats_desc[j].desc);
p += ETH_GSTRING_LEN;
}
}
+ for (i = 0; i < dev->real_num_tx_queues; i++) {
+ for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "tx_queue_%u_%.18s",
+ i, veth_tq_stats_desc[j].desc);
+ p += ETH_GSTRING_LEN;
+ }
+ }
break;
}
}
@@ -138,7 +164,8 @@
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(ethtool_stats_keys) +
- VETH_RQ_STATS_LEN * dev->real_num_rx_queues;
+ VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
+ VETH_TQ_STATS_LEN * dev->real_num_tx_queues;
default:
return -EOPNOTSUPP;
}
@@ -147,7 +174,7 @@
static void veth_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
- struct veth_priv *priv = netdev_priv(dev);
+ struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
struct net_device *peer = rtnl_dereference(priv->peer);
int i, j, idx;
@@ -155,7 +182,7 @@
idx = 1;
for (i = 0; i < dev->real_num_rx_queues; i++) {
const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
- const void *stats_base = (void *)rq_stats;
+ const void *stats_base = (void *)&rq_stats->vs;
unsigned int start;
size_t offset;
@@ -168,6 +195,26 @@
} while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
idx += VETH_RQ_STATS_LEN;
}
+
+ if (!peer)
+ return;
+
+ rcv_priv = netdev_priv(peer);
+ for (i = 0; i < peer->real_num_rx_queues; i++) {
+ const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
+ const void *base = (void *)&rq_stats->vs;
+ unsigned int start, tx_idx = idx;
+ size_t offset;
+
+ tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
+ do {
+ start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
+ for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
+ offset = veth_tq_stats_desc[j].offset;
+ data[tx_idx + j] += *(u64 *)(base + offset);
+ }
+ } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
+ }
}
static const struct ethtool_ops veth_ethtool_ops = {
@@ -187,14 +234,14 @@
return (unsigned long)ptr & VETH_XDP_FLAG;
}
-static void *veth_ptr_to_xdp(void *ptr)
+static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
{
return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
}
-static void *veth_xdp_to_ptr(void *ptr)
+static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
{
- return (void *)((unsigned long)ptr | VETH_XDP_FLAG);
+ return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
}
static void veth_ptr_free(void *ptr)
@@ -209,9 +256,10 @@
{
/* Write ptr_ring before reading rx_notify_masked */
smp_mb();
- if (!rq->rx_notify_masked) {
- rq->rx_notify_masked = true;
- napi_schedule(&rq->xdp_napi);
+ if (!READ_ONCE(rq->rx_notify_masked) &&
+ napi_schedule_prep(&rq->xdp_napi)) {
+ WRITE_ONCE(rq->rx_notify_masked, true);
+ __napi_schedule(&rq->xdp_napi);
}
}
@@ -254,19 +302,12 @@
if (rxq < rcv->real_num_rx_queues) {
rq = &rcv_priv->rq[rxq];
rcv_xdp = rcu_access_pointer(rq->xdp_prog);
- skb_record_rx_queue(skb, rxq);
}
skb_tx_timestamp(skb);
if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
- if (!rcv_xdp) {
- struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->bytes += length;
- stats->packets++;
- u64_stats_update_end(&stats->syncp);
- }
+ if (!rcv_xdp)
+ dev_lstats_add(dev, length);
} else {
drop:
atomic64_inc(&priv->dropped);
@@ -280,51 +321,42 @@
return NETDEV_TX_OK;
}
-static u64 veth_stats_tx(struct pcpu_lstats *result, struct net_device *dev)
+static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
{
struct veth_priv *priv = netdev_priv(dev);
- int cpu;
- result->packets = 0;
- result->bytes = 0;
- for_each_possible_cpu(cpu) {
- struct pcpu_lstats *stats = per_cpu_ptr(dev->lstats, cpu);
- u64 packets, bytes;
- unsigned int start;
-
- do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
- result->packets += packets;
- result->bytes += bytes;
- }
+ dev_lstats_read(dev, packets, bytes);
return atomic64_read(&priv->dropped);
}
-static void veth_stats_rx(struct veth_rq_stats *result, struct net_device *dev)
+static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
int i;
+ result->peer_tq_xdp_xmit_err = 0;
result->xdp_packets = 0;
+ result->xdp_tx_err = 0;
result->xdp_bytes = 0;
- result->xdp_drops = 0;
+ result->rx_drops = 0;
for (i = 0; i < dev->num_rx_queues; i++) {
+ u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
struct veth_rq_stats *stats = &priv->rq[i].stats;
- u64 packets, bytes, drops;
unsigned int start;
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->xdp_packets;
- bytes = stats->xdp_bytes;
- drops = stats->xdp_drops;
+ peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
+ xdp_tx_err = stats->vs.xdp_tx_err;
+ packets = stats->vs.xdp_packets;
+ bytes = stats->vs.xdp_bytes;
+ drops = stats->vs.rx_drops;
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
+ result->xdp_tx_err += xdp_tx_err;
result->xdp_packets += packets;
result->xdp_bytes += bytes;
- result->xdp_drops += drops;
+ result->rx_drops += drops;
}
}
@@ -333,26 +365,29 @@
{
struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer;
- struct veth_rq_stats rx;
- struct pcpu_lstats tx;
+ struct veth_stats rx;
+ u64 packets, bytes;
- tot->tx_dropped = veth_stats_tx(&tx, dev);
- tot->tx_bytes = tx.bytes;
- tot->tx_packets = tx.packets;
+ tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
+ tot->tx_bytes = bytes;
+ tot->tx_packets = packets;
veth_stats_rx(&rx, dev);
- tot->rx_dropped = rx.xdp_drops;
+ tot->tx_dropped += rx.xdp_tx_err;
+ tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
tot->rx_bytes = rx.xdp_bytes;
tot->rx_packets = rx.xdp_packets;
rcu_read_lock();
peer = rcu_dereference(priv->peer);
if (peer) {
- tot->rx_dropped += veth_stats_tx(&tx, peer);
- tot->rx_bytes += tx.bytes;
- tot->rx_packets += tx.packets;
+ veth_stats_tx(peer, &packets, &bytes);
+ tot->rx_bytes += bytes;
+ tot->rx_packets += packets;
veth_stats_rx(&rx, peer);
+ tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
+ tot->rx_dropped += rx.xdp_tx_err;
tot->tx_bytes += rx.xdp_bytes;
tot->tx_packets += rx.xdp_packets;
}
@@ -369,10 +404,6 @@
{
struct sk_buff *skb;
- if (!buflen) {
- buflen = SKB_DATA_ALIGN(headroom + len) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- }
skb = build_skb(head, buflen);
if (!skb)
return NULL;
@@ -388,25 +419,31 @@
return smp_processor_id() % dev->real_num_rx_queues;
}
+static struct net_device *veth_peer_dev(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+
+ /* Callers must be under RCU read side. */
+ return rcu_dereference(priv->peer);
+}
+
static int veth_xdp_xmit(struct net_device *dev, int n,
- struct xdp_frame **frames, u32 flags)
+ struct xdp_frame **frames,
+ u32 flags, bool ndo_xmit)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ int i, ret = -ENXIO, drops = 0;
struct net_device *rcv;
- int i, ret, drops = n;
unsigned int max_len;
struct veth_rq *rq;
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
- ret = -EINVAL;
- goto drop;
- }
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+ rcu_read_lock();
rcv = rcu_dereference(priv->peer);
- if (unlikely(!rcv)) {
- ret = -ENXIO;
- goto drop;
- }
+ if (unlikely(!rcv))
+ goto out;
rcv_priv = netdev_priv(rcv);
rq = &rcv_priv->rq[veth_select_rxq(rcv)];
@@ -414,12 +451,9 @@
* side. This means an XDP program is loaded on the peer and the peer
* device is up.
*/
- if (!rcu_access_pointer(rq->xdp_prog)) {
- ret = -ENXIO;
- goto drop;
- }
+ if (!rcu_access_pointer(rq->xdp_prog))
+ goto out;
- drops = 0;
max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
spin_lock(&rq->xdp_ring.producer_lock);
@@ -438,65 +472,89 @@
if (flags & XDP_XMIT_FLUSH)
__veth_xdp_flush(rq);
- if (likely(!drops))
- return n;
-
ret = n - drops;
-drop:
- atomic64_add(drops, &priv->dropped);
+ if (ndo_xmit) {
+ u64_stats_update_begin(&rq->stats.syncp);
+ rq->stats.vs.peer_tq_xdp_xmit += n - drops;
+ rq->stats.vs.peer_tq_xdp_xmit_err += drops;
+ u64_stats_update_end(&rq->stats.syncp);
+ }
+
+out:
+ rcu_read_unlock();
return ret;
}
-static void veth_xdp_flush_bq(struct net_device *dev, struct veth_xdp_tx_bq *bq)
+static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ int err;
+
+ err = veth_xdp_xmit(dev, n, frames, flags, true);
+ if (err < 0) {
+ struct veth_priv *priv = netdev_priv(dev);
+
+ atomic64_add(n, &priv->dropped);
+ }
+
+ return err;
+}
+
+static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
{
int sent, i, err = 0;
- sent = veth_xdp_xmit(dev, bq->count, bq->q, 0);
+ sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
if (sent < 0) {
err = sent;
sent = 0;
for (i = 0; i < bq->count; i++)
xdp_return_frame(bq->q[i]);
}
- trace_xdp_bulk_tx(dev, sent, bq->count - sent, err);
+ trace_xdp_bulk_tx(rq->dev, sent, bq->count - sent, err);
+
+ u64_stats_update_begin(&rq->stats.syncp);
+ rq->stats.vs.xdp_tx += sent;
+ rq->stats.vs.xdp_tx_err += bq->count - sent;
+ u64_stats_update_end(&rq->stats.syncp);
bq->count = 0;
}
-static void veth_xdp_flush(struct net_device *dev, struct veth_xdp_tx_bq *bq)
+static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
{
- struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
struct net_device *rcv;
- struct veth_rq *rq;
+ struct veth_rq *rcv_rq;
rcu_read_lock();
- veth_xdp_flush_bq(dev, bq);
+ veth_xdp_flush_bq(rq, bq);
rcv = rcu_dereference(priv->peer);
if (unlikely(!rcv))
goto out;
rcv_priv = netdev_priv(rcv);
- rq = &rcv_priv->rq[veth_select_rxq(rcv)];
+ rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
/* xdp_ring is initialized on receive side? */
- if (unlikely(!rcu_access_pointer(rq->xdp_prog)))
+ if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
goto out;
- __veth_xdp_flush(rq);
+ __veth_xdp_flush(rcv_rq);
out:
rcu_read_unlock();
}
-static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp,
+static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
struct veth_xdp_tx_bq *bq)
{
- struct xdp_frame *frame = convert_to_xdp_frame(xdp);
+ struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
if (unlikely(!frame))
return -EOVERFLOW;
if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
- veth_xdp_flush_bq(dev, bq);
+ veth_xdp_flush_bq(rq, bq);
bq->q[bq->count++] = frame;
@@ -505,8 +563,8 @@
static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
struct xdp_frame *frame,
- unsigned int *xdp_xmit,
- struct veth_xdp_tx_bq *bq)
+ struct veth_xdp_tx_bq *bq,
+ struct veth_stats *stats)
{
void *hard_start = frame->data - frame->headroom;
int len = frame->len, delta = 0;
@@ -524,10 +582,7 @@
struct xdp_buff xdp;
u32 act;
- xdp.data_hard_start = hard_start;
- xdp.data = frame->data;
- xdp.data_end = frame->data + frame->len;
- xdp.data_meta = frame->data - frame->metasize;
+ xdp_convert_frame_to_buff(frame, &xdp);
xdp.rxq = &rq->xdp_rxq;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -540,12 +595,13 @@
case XDP_TX:
orig_frame = *frame;
xdp.rxq->mem = frame->mem;
- if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) {
+ if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
frame = &orig_frame;
+ stats->rx_drops++;
goto err_xdp;
}
- *xdp_xmit |= VETH_XDP_TX;
+ stats->xdp_tx++;
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
@@ -553,27 +609,30 @@
xdp.rxq->mem = frame->mem;
if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
frame = &orig_frame;
+ stats->rx_drops++;
goto err_xdp;
}
- *xdp_xmit |= VETH_XDP_REDIR;
+ stats->xdp_redirect++;
rcu_read_unlock();
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rq->dev, xdp_prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
+ stats->xdp_drops++;
goto err_xdp;
}
}
rcu_read_unlock();
headroom = sizeof(struct xdp_frame) + frame->headroom - delta;
- skb = veth_build_skb(hard_start, headroom, len, 0);
+ skb = veth_build_skb(hard_start, headroom, len, frame->frame_sz);
if (!skb) {
xdp_return_frame(frame);
+ stats->rx_drops++;
goto err;
}
@@ -589,9 +648,10 @@
return NULL;
}
-static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
- unsigned int *xdp_xmit,
- struct veth_xdp_tx_bq *bq)
+static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
+ struct sk_buff *skb,
+ struct veth_xdp_tx_bq *bq,
+ struct veth_stats *stats)
{
u32 pktlen, headroom, act, metalen;
void *orig_data, *orig_data_end;
@@ -635,9 +695,8 @@
goto drop;
}
- nskb = veth_build_skb(head,
- VETH_XDP_HEADROOM + mac_len, skb->len,
- PAGE_SIZE);
+ nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len,
+ skb->len, PAGE_SIZE);
if (!nskb) {
page_frag_free(head);
goto drop;
@@ -655,6 +714,11 @@
xdp.data_end = xdp.data + pktlen;
xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
+
+ /* SKB "head" area always have tailroom for skb_shared_info */
+ xdp.frame_sz = (void *)skb_end_pointer(skb) - xdp.data_hard_start;
+ xdp.frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
orig_data = xdp.data;
orig_data_end = xdp.data_end;
@@ -667,33 +731,38 @@
get_page(virt_to_page(xdp.data));
consume_skb(skb);
xdp.rxq->mem = rq->xdp_mem;
- if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) {
+ if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
+ stats->rx_drops++;
goto err_xdp;
}
- *xdp_xmit |= VETH_XDP_TX;
+ stats->xdp_tx++;
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
get_page(virt_to_page(xdp.data));
consume_skb(skb);
xdp.rxq->mem = rq->xdp_mem;
- if (xdp_do_redirect(rq->dev, &xdp, xdp_prog))
+ if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
+ stats->rx_drops++;
goto err_xdp;
- *xdp_xmit |= VETH_XDP_REDIR;
+ }
+ stats->xdp_redirect++;
rcu_read_unlock();
goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rq->dev, xdp_prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
- goto drop;
+ stats->xdp_drops++;
+ goto xdp_drop;
}
rcu_read_unlock();
+ /* check if bpf_xdp_adjust_head was used */
delta = orig_data - xdp.data;
off = mac_len + delta;
if (off > 0)
@@ -701,9 +770,11 @@
else if (off < 0)
__skb_pull(skb, -off);
skb->mac_header -= delta;
+
+ /* check if bpf_xdp_adjust_tail was used */
off = xdp.data_end - orig_data_end;
if (off != 0)
- __skb_put(skb, off);
+ __skb_put(skb, off); /* positive on grow, negative on shrink */
skb->protocol = eth_type_trans(skb, rq->dev);
metalen = xdp.data - xdp.data_meta;
@@ -712,6 +783,8 @@
out:
return skb;
drop:
+ stats->rx_drops++;
+xdp_drop:
rcu_read_unlock();
kfree_skb(skb);
return NULL;
@@ -722,14 +795,14 @@
return NULL;
}
-static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit,
- struct veth_xdp_tx_bq *bq)
+static int veth_xdp_rcv(struct veth_rq *rq, int budget,
+ struct veth_xdp_tx_bq *bq,
+ struct veth_stats *stats)
{
- int i, done = 0, drops = 0, bytes = 0;
+ int i, done = 0;
for (i = 0; i < budget; i++) {
void *ptr = __ptr_ring_consume(&rq->xdp_ring);
- unsigned int xdp_xmit_one = 0;
struct sk_buff *skb;
if (!ptr)
@@ -738,27 +811,26 @@
if (veth_is_xdp_frame(ptr)) {
struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
- bytes += frame->len;
- skb = veth_xdp_rcv_one(rq, frame, &xdp_xmit_one, bq);
+ stats->xdp_bytes += frame->len;
+ skb = veth_xdp_rcv_one(rq, frame, bq, stats);
} else {
skb = ptr;
- bytes += skb->len;
- skb = veth_xdp_rcv_skb(rq, skb, &xdp_xmit_one, bq);
+ stats->xdp_bytes += skb->len;
+ skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
}
- *xdp_xmit |= xdp_xmit_one;
if (skb)
napi_gro_receive(&rq->xdp_napi, skb);
- else if (!xdp_xmit_one)
- drops++;
done++;
}
u64_stats_update_begin(&rq->stats.syncp);
- rq->stats.xdp_packets += done;
- rq->stats.xdp_bytes += bytes;
- rq->stats.xdp_drops += drops;
+ rq->stats.vs.xdp_redirect += stats->xdp_redirect;
+ rq->stats.vs.xdp_bytes += stats->xdp_bytes;
+ rq->stats.vs.xdp_drops += stats->xdp_drops;
+ rq->stats.vs.rx_drops += stats->rx_drops;
+ rq->stats.vs.xdp_packets += done;
u64_stats_update_end(&rq->stats.syncp);
return done;
@@ -768,28 +840,30 @@
{
struct veth_rq *rq =
container_of(napi, struct veth_rq, xdp_napi);
- unsigned int xdp_xmit = 0;
+ struct veth_stats stats = {};
struct veth_xdp_tx_bq bq;
int done;
bq.count = 0;
xdp_set_return_frame_no_direct();
- done = veth_xdp_rcv(rq, budget, &xdp_xmit, &bq);
+ done = veth_xdp_rcv(rq, budget, &bq, &stats);
if (done < budget && napi_complete_done(napi, done)) {
/* Write rx_notify_masked before reading ptr_ring */
smp_store_mb(rq->rx_notify_masked, false);
if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
- rq->rx_notify_masked = true;
- napi_schedule(&rq->xdp_napi);
+ if (napi_schedule_prep(&rq->xdp_napi)) {
+ WRITE_ONCE(rq->rx_notify_masked, true);
+ __napi_schedule(&rq->xdp_napi);
+ }
}
}
- if (xdp_xmit & VETH_XDP_TX)
- veth_xdp_flush(rq->dev, &bq);
- if (xdp_xmit & VETH_XDP_REDIR)
- xdp_do_flush_map();
+ if (stats.xdp_tx > 0)
+ veth_xdp_flush(rq, &bq);
+ if (stats.xdp_redirect > 0)
+ xdp_do_flush();
xdp_clear_return_frame_no_direct();
return done;
@@ -832,14 +906,13 @@
struct veth_rq *rq = &priv->rq[i];
napi_disable(&rq->xdp_napi);
- napi_hash_del(&rq->xdp_napi);
+ __netif_napi_del(&rq->xdp_napi);
}
synchronize_net();
for (i = 0; i < dev->real_num_rx_queues; i++) {
struct veth_rq *rq = &priv->rq[i];
- netif_napi_del(&rq->xdp_napi);
rq->rx_notify_masked = false;
ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
}
@@ -1133,26 +1206,11 @@
return err;
}
-static u32 veth_xdp_query(struct net_device *dev)
-{
- struct veth_priv *priv = netdev_priv(dev);
- const struct bpf_prog *xdp_prog;
-
- xdp_prog = priv->_xdp_prog;
- if (xdp_prog)
- return xdp_prog->aux->id;
-
- return 0;
-}
-
static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
return veth_xdp_set(dev, xdp->prog, xdp->extack);
- case XDP_QUERY_PROG:
- xdp->prog_id = veth_xdp_query(dev);
- return 0;
default:
return -EINVAL;
}
@@ -1174,7 +1232,8 @@
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = veth_set_rx_headroom,
.ndo_bpf = veth_xdp,
- .ndo_xdp_xmit = veth_xdp_xmit,
+ .ndo_xdp_xmit = veth_ndo_xdp_xmit,
+ .ndo_get_peer_dev = veth_peer_dev,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \