David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3 | * History |
| 4 | * 03-01-2007 Added forwarding for x.25 Andrew Hendry |
| 5 | */ |
| 6 | |
| 7 | #define pr_fmt(fmt) "X25: " fmt |
| 8 | |
| 9 | #include <linux/if_arp.h> |
| 10 | #include <linux/init.h> |
| 11 | #include <linux/slab.h> |
| 12 | #include <net/x25.h> |
| 13 | |
| 14 | LIST_HEAD(x25_forward_list); |
| 15 | DEFINE_RWLOCK(x25_forward_list_lock); |
| 16 | |
| 17 | int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from, |
| 18 | struct sk_buff *skb, int lci) |
| 19 | { |
| 20 | struct x25_route *rt; |
| 21 | struct x25_neigh *neigh_new = NULL; |
| 22 | struct list_head *entry; |
| 23 | struct x25_forward *x25_frwd, *new_frwd; |
| 24 | struct sk_buff *skbn; |
| 25 | short same_lci = 0; |
| 26 | int rc = 0; |
| 27 | |
| 28 | if ((rt = x25_get_route(dest_addr)) == NULL) |
| 29 | goto out_no_route; |
| 30 | |
| 31 | if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) { |
| 32 | /* This shouldn't happen, if it occurs somehow |
| 33 | * do something sensible |
| 34 | */ |
| 35 | goto out_put_route; |
| 36 | } |
| 37 | |
| 38 | /* Avoid a loop. This is the normal exit path for a |
| 39 | * system with only one x.25 iface and default route |
| 40 | */ |
| 41 | if (rt->dev == from->dev) { |
| 42 | goto out_put_nb; |
| 43 | } |
| 44 | |
| 45 | /* Remote end sending a call request on an already |
| 46 | * established LCI? It shouldn't happen, just in case.. |
| 47 | */ |
| 48 | read_lock_bh(&x25_forward_list_lock); |
| 49 | list_for_each(entry, &x25_forward_list) { |
| 50 | x25_frwd = list_entry(entry, struct x25_forward, node); |
| 51 | if (x25_frwd->lci == lci) { |
| 52 | pr_warn("call request for lci which is already registered!, transmitting but not registering new pair\n"); |
| 53 | same_lci = 1; |
| 54 | } |
| 55 | } |
| 56 | read_unlock_bh(&x25_forward_list_lock); |
| 57 | |
| 58 | /* Save the forwarding details for future traffic */ |
| 59 | if (!same_lci){ |
| 60 | if ((new_frwd = kmalloc(sizeof(struct x25_forward), |
| 61 | GFP_ATOMIC)) == NULL){ |
| 62 | rc = -ENOMEM; |
| 63 | goto out_put_nb; |
| 64 | } |
| 65 | new_frwd->lci = lci; |
| 66 | new_frwd->dev1 = rt->dev; |
| 67 | new_frwd->dev2 = from->dev; |
| 68 | write_lock_bh(&x25_forward_list_lock); |
| 69 | list_add(&new_frwd->node, &x25_forward_list); |
| 70 | write_unlock_bh(&x25_forward_list_lock); |
| 71 | } |
| 72 | |
| 73 | /* Forward the call request */ |
| 74 | if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){ |
| 75 | goto out_put_nb; |
| 76 | } |
| 77 | x25_transmit_link(skbn, neigh_new); |
| 78 | rc = 1; |
| 79 | |
| 80 | |
| 81 | out_put_nb: |
| 82 | x25_neigh_put(neigh_new); |
| 83 | |
| 84 | out_put_route: |
| 85 | x25_route_put(rt); |
| 86 | |
| 87 | out_no_route: |
| 88 | return rc; |
| 89 | } |
| 90 | |
| 91 | |
| 92 | int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) { |
| 93 | |
| 94 | struct x25_forward *frwd; |
| 95 | struct list_head *entry; |
| 96 | struct net_device *peer = NULL; |
| 97 | struct x25_neigh *nb; |
| 98 | struct sk_buff *skbn; |
| 99 | int rc = 0; |
| 100 | |
| 101 | read_lock_bh(&x25_forward_list_lock); |
| 102 | list_for_each(entry, &x25_forward_list) { |
| 103 | frwd = list_entry(entry, struct x25_forward, node); |
| 104 | if (frwd->lci == lci) { |
| 105 | /* The call is established, either side can send */ |
| 106 | if (from->dev == frwd->dev1) { |
| 107 | peer = frwd->dev2; |
| 108 | } else { |
| 109 | peer = frwd->dev1; |
| 110 | } |
| 111 | break; |
| 112 | } |
| 113 | } |
| 114 | read_unlock_bh(&x25_forward_list_lock); |
| 115 | |
| 116 | if ( (nb = x25_get_neigh(peer)) == NULL) |
| 117 | goto out; |
| 118 | |
| 119 | if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){ |
| 120 | goto output; |
| 121 | |
| 122 | } |
| 123 | x25_transmit_link(skbn, nb); |
| 124 | |
| 125 | rc = 1; |
| 126 | output: |
| 127 | x25_neigh_put(nb); |
| 128 | out: |
| 129 | return rc; |
| 130 | } |
| 131 | |
| 132 | void x25_clear_forward_by_lci(unsigned int lci) |
| 133 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 134 | struct x25_forward *fwd, *tmp; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 135 | |
| 136 | write_lock_bh(&x25_forward_list_lock); |
| 137 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 138 | list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 139 | if (fwd->lci == lci) { |
| 140 | list_del(&fwd->node); |
| 141 | kfree(fwd); |
| 142 | } |
| 143 | } |
| 144 | write_unlock_bh(&x25_forward_list_lock); |
| 145 | } |
| 146 | |
| 147 | |
| 148 | void x25_clear_forward_by_dev(struct net_device *dev) |
| 149 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 150 | struct x25_forward *fwd, *tmp; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 151 | |
| 152 | write_lock_bh(&x25_forward_list_lock); |
| 153 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 154 | list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 155 | if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){ |
| 156 | list_del(&fwd->node); |
| 157 | kfree(fwd); |
| 158 | } |
| 159 | } |
| 160 | write_unlock_bh(&x25_forward_list_lock); |
| 161 | } |