Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _NET_NEIGHBOUR_H |
| 3 | #define _NET_NEIGHBOUR_H |
| 4 | |
| 5 | #include <linux/neighbour.h> |
| 6 | |
| 7 | /* |
| 8 | * Generic neighbour manipulation |
| 9 | * |
| 10 | * Authors: |
| 11 | * Pedro Roque <roque@di.fc.ul.pt> |
| 12 | * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> |
| 13 | * |
| 14 | * Changes: |
| 15 | * |
| 16 | * Harald Welte: <laforge@gnumonks.org> |
| 17 | * - Add neighbour cache statistics like rtstat |
| 18 | */ |
| 19 | |
| 20 | #include <linux/atomic.h> |
| 21 | #include <linux/refcount.h> |
| 22 | #include <linux/netdevice.h> |
| 23 | #include <linux/skbuff.h> |
| 24 | #include <linux/rcupdate.h> |
| 25 | #include <linux/seq_file.h> |
| 26 | #include <linux/bitmap.h> |
| 27 | |
| 28 | #include <linux/err.h> |
| 29 | #include <linux/sysctl.h> |
| 30 | #include <linux/workqueue.h> |
| 31 | #include <net/rtnetlink.h> |
| 32 | |
| 33 | /* |
| 34 | * NUD stands for "neighbor unreachability detection" |
| 35 | */ |
| 36 | |
| 37 | #define NUD_IN_TIMER (NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE) |
| 38 | #define NUD_VALID (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY) |
| 39 | #define NUD_CONNECTED (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE) |
| 40 | |
| 41 | struct neighbour; |
| 42 | |
| 43 | enum { |
| 44 | NEIGH_VAR_MCAST_PROBES, |
| 45 | NEIGH_VAR_UCAST_PROBES, |
| 46 | NEIGH_VAR_APP_PROBES, |
| 47 | NEIGH_VAR_MCAST_REPROBES, |
| 48 | NEIGH_VAR_RETRANS_TIME, |
| 49 | NEIGH_VAR_BASE_REACHABLE_TIME, |
| 50 | NEIGH_VAR_DELAY_PROBE_TIME, |
| 51 | NEIGH_VAR_GC_STALETIME, |
| 52 | NEIGH_VAR_QUEUE_LEN_BYTES, |
| 53 | NEIGH_VAR_PROXY_QLEN, |
| 54 | NEIGH_VAR_ANYCAST_DELAY, |
| 55 | NEIGH_VAR_PROXY_DELAY, |
| 56 | NEIGH_VAR_LOCKTIME, |
| 57 | #define NEIGH_VAR_DATA_MAX (NEIGH_VAR_LOCKTIME + 1) |
| 58 | /* Following are used as a second way to access one of the above */ |
| 59 | NEIGH_VAR_QUEUE_LEN, /* same data as NEIGH_VAR_QUEUE_LEN_BYTES */ |
| 60 | NEIGH_VAR_RETRANS_TIME_MS, /* same data as NEIGH_VAR_RETRANS_TIME */ |
| 61 | NEIGH_VAR_BASE_REACHABLE_TIME_MS, /* same data as NEIGH_VAR_BASE_REACHABLE_TIME */ |
| 62 | /* Following are used by "default" only */ |
| 63 | NEIGH_VAR_GC_INTERVAL, |
| 64 | NEIGH_VAR_GC_THRESH1, |
| 65 | NEIGH_VAR_GC_THRESH2, |
| 66 | NEIGH_VAR_GC_THRESH3, |
| 67 | NEIGH_VAR_MAX |
| 68 | }; |
| 69 | |
| 70 | struct neigh_parms { |
| 71 | possible_net_t net; |
| 72 | struct net_device *dev; |
| 73 | struct list_head list; |
| 74 | int (*neigh_setup)(struct neighbour *); |
| 75 | void (*neigh_cleanup)(struct neighbour *); |
| 76 | struct neigh_table *tbl; |
| 77 | |
| 78 | void *sysctl_table; |
| 79 | |
| 80 | int dead; |
| 81 | refcount_t refcnt; |
| 82 | struct rcu_head rcu_head; |
| 83 | |
| 84 | int reachable_time; |
| 85 | int data[NEIGH_VAR_DATA_MAX]; |
| 86 | DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX); |
| 87 | }; |
| 88 | |
| 89 | static inline void neigh_var_set(struct neigh_parms *p, int index, int val) |
| 90 | { |
| 91 | set_bit(index, p->data_state); |
| 92 | p->data[index] = val; |
| 93 | } |
| 94 | |
| 95 | #define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr]) |
| 96 | |
| 97 | /* In ndo_neigh_setup, NEIGH_VAR_INIT should be used. |
| 98 | * In other cases, NEIGH_VAR_SET should be used. |
| 99 | */ |
| 100 | #define NEIGH_VAR_INIT(p, attr, val) (NEIGH_VAR(p, attr) = val) |
| 101 | #define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val) |
| 102 | |
| 103 | static inline void neigh_parms_data_state_setall(struct neigh_parms *p) |
| 104 | { |
| 105 | bitmap_fill(p->data_state, NEIGH_VAR_DATA_MAX); |
| 106 | } |
| 107 | |
| 108 | static inline void neigh_parms_data_state_cleanall(struct neigh_parms *p) |
| 109 | { |
| 110 | bitmap_zero(p->data_state, NEIGH_VAR_DATA_MAX); |
| 111 | } |
| 112 | |
| 113 | struct neigh_statistics { |
| 114 | unsigned long allocs; /* number of allocated neighs */ |
| 115 | unsigned long destroys; /* number of destroyed neighs */ |
| 116 | unsigned long hash_grows; /* number of hash resizes */ |
| 117 | |
| 118 | unsigned long res_failed; /* number of failed resolutions */ |
| 119 | |
| 120 | unsigned long lookups; /* number of lookups */ |
| 121 | unsigned long hits; /* number of hits (among lookups) */ |
| 122 | |
| 123 | unsigned long rcv_probes_mcast; /* number of received mcast ipv6 */ |
| 124 | unsigned long rcv_probes_ucast; /* number of received ucast ipv6 */ |
| 125 | |
| 126 | unsigned long periodic_gc_runs; /* number of periodic GC runs */ |
| 127 | unsigned long forced_gc_runs; /* number of forced GC runs */ |
| 128 | |
| 129 | unsigned long unres_discards; /* number of unresolved drops */ |
| 130 | unsigned long table_fulls; /* times even gc couldn't help */ |
| 131 | }; |
| 132 | |
| 133 | #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) |
| 134 | |
| 135 | struct neighbour { |
| 136 | struct neighbour __rcu *next; |
| 137 | struct neigh_table *tbl; |
| 138 | struct neigh_parms *parms; |
| 139 | unsigned long confirmed; |
| 140 | unsigned long updated; |
| 141 | rwlock_t lock; |
| 142 | refcount_t refcnt; |
| 143 | struct sk_buff_head arp_queue; |
| 144 | unsigned int arp_queue_len_bytes; |
| 145 | struct timer_list timer; |
| 146 | unsigned long used; |
| 147 | atomic_t probes; |
| 148 | __u8 flags; |
| 149 | __u8 nud_state; |
| 150 | __u8 type; |
| 151 | __u8 dead; |
| 152 | seqlock_t ha_lock; |
| 153 | unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))]; |
| 154 | struct hh_cache hh; |
| 155 | int (*output)(struct neighbour *, struct sk_buff *); |
| 156 | const struct neigh_ops *ops; |
| 157 | struct rcu_head rcu; |
| 158 | struct net_device *dev; |
| 159 | u8 primary_key[0]; |
| 160 | } __randomize_layout; |
| 161 | |
| 162 | struct neigh_ops { |
| 163 | int family; |
| 164 | void (*solicit)(struct neighbour *, struct sk_buff *); |
| 165 | void (*error_report)(struct neighbour *, struct sk_buff *); |
| 166 | int (*output)(struct neighbour *, struct sk_buff *); |
| 167 | int (*connected_output)(struct neighbour *, struct sk_buff *); |
| 168 | }; |
| 169 | |
| 170 | struct pneigh_entry { |
| 171 | struct pneigh_entry *next; |
| 172 | possible_net_t net; |
| 173 | struct net_device *dev; |
| 174 | u8 flags; |
| 175 | u8 key[0]; |
| 176 | }; |
| 177 | |
| 178 | /* |
| 179 | * neighbour table manipulation |
| 180 | */ |
| 181 | |
| 182 | #define NEIGH_NUM_HASH_RND 4 |
| 183 | |
| 184 | struct neigh_hash_table { |
| 185 | struct neighbour __rcu **hash_buckets; |
| 186 | unsigned int hash_shift; |
| 187 | __u32 hash_rnd[NEIGH_NUM_HASH_RND]; |
| 188 | struct rcu_head rcu; |
| 189 | }; |
| 190 | |
| 191 | |
| 192 | struct neigh_table { |
| 193 | int family; |
| 194 | unsigned int entry_size; |
| 195 | unsigned int key_len; |
| 196 | __be16 protocol; |
| 197 | __u32 (*hash)(const void *pkey, |
| 198 | const struct net_device *dev, |
| 199 | __u32 *hash_rnd); |
| 200 | bool (*key_eq)(const struct neighbour *, const void *pkey); |
| 201 | int (*constructor)(struct neighbour *); |
| 202 | int (*pconstructor)(struct pneigh_entry *); |
| 203 | void (*pdestructor)(struct pneigh_entry *); |
| 204 | void (*proxy_redo)(struct sk_buff *skb); |
| 205 | char *id; |
| 206 | struct neigh_parms parms; |
| 207 | struct list_head parms_list; |
| 208 | int gc_interval; |
| 209 | int gc_thresh1; |
| 210 | int gc_thresh2; |
| 211 | int gc_thresh3; |
| 212 | unsigned long last_flush; |
| 213 | struct delayed_work gc_work; |
| 214 | struct timer_list proxy_timer; |
| 215 | struct sk_buff_head proxy_queue; |
| 216 | atomic_t entries; |
| 217 | rwlock_t lock; |
| 218 | unsigned long last_rand; |
| 219 | struct neigh_statistics __percpu *stats; |
| 220 | struct neigh_hash_table __rcu *nht; |
| 221 | struct pneigh_entry **phash_buckets; |
| 222 | }; |
| 223 | |
| 224 | enum { |
| 225 | NEIGH_ARP_TABLE = 0, |
| 226 | NEIGH_ND_TABLE = 1, |
| 227 | NEIGH_DN_TABLE = 2, |
| 228 | NEIGH_NR_TABLES, |
| 229 | NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */ |
| 230 | }; |
| 231 | |
| 232 | static inline int neigh_parms_family(struct neigh_parms *p) |
| 233 | { |
| 234 | return p->tbl->family; |
| 235 | } |
| 236 | |
| 237 | #define NEIGH_PRIV_ALIGN sizeof(long long) |
| 238 | #define NEIGH_ENTRY_SIZE(size) ALIGN((size), NEIGH_PRIV_ALIGN) |
| 239 | |
| 240 | static inline void *neighbour_priv(const struct neighbour *n) |
| 241 | { |
| 242 | return (char *)n + n->tbl->entry_size; |
| 243 | } |
| 244 | |
| 245 | /* flags for neigh_update() */ |
| 246 | #define NEIGH_UPDATE_F_OVERRIDE 0x00000001 |
| 247 | #define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002 |
| 248 | #define NEIGH_UPDATE_F_OVERRIDE_ISROUTER 0x00000004 |
| 249 | #define NEIGH_UPDATE_F_EXT_LEARNED 0x20000000 |
| 250 | #define NEIGH_UPDATE_F_ISROUTER 0x40000000 |
| 251 | #define NEIGH_UPDATE_F_ADMIN 0x80000000 |
| 252 | |
| 253 | |
| 254 | static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey) |
| 255 | { |
| 256 | return *(const u16 *)n->primary_key == *(const u16 *)pkey; |
| 257 | } |
| 258 | |
| 259 | static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey) |
| 260 | { |
| 261 | return *(const u32 *)n->primary_key == *(const u32 *)pkey; |
| 262 | } |
| 263 | |
| 264 | static inline bool neigh_key_eq128(const struct neighbour *n, const void *pkey) |
| 265 | { |
| 266 | const u32 *n32 = (const u32 *)n->primary_key; |
| 267 | const u32 *p32 = pkey; |
| 268 | |
| 269 | return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) | |
| 270 | (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0; |
| 271 | } |
| 272 | |
| 273 | static inline struct neighbour *___neigh_lookup_noref( |
| 274 | struct neigh_table *tbl, |
| 275 | bool (*key_eq)(const struct neighbour *n, const void *pkey), |
| 276 | __u32 (*hash)(const void *pkey, |
| 277 | const struct net_device *dev, |
| 278 | __u32 *hash_rnd), |
| 279 | const void *pkey, |
| 280 | struct net_device *dev) |
| 281 | { |
| 282 | struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht); |
| 283 | struct neighbour *n; |
| 284 | u32 hash_val; |
| 285 | |
| 286 | hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); |
| 287 | for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); |
| 288 | n != NULL; |
| 289 | n = rcu_dereference_bh(n->next)) { |
| 290 | if (n->dev == dev && key_eq(n, pkey)) |
| 291 | return n; |
| 292 | } |
| 293 | |
| 294 | return NULL; |
| 295 | } |
| 296 | |
| 297 | static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl, |
| 298 | const void *pkey, |
| 299 | struct net_device *dev) |
| 300 | { |
| 301 | return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev); |
| 302 | } |
| 303 | |
| 304 | void neigh_table_init(int index, struct neigh_table *tbl); |
| 305 | int neigh_table_clear(int index, struct neigh_table *tbl); |
| 306 | struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, |
| 307 | struct net_device *dev); |
| 308 | struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, |
| 309 | const void *pkey); |
| 310 | struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, |
| 311 | struct net_device *dev, bool want_ref); |
| 312 | static inline struct neighbour *neigh_create(struct neigh_table *tbl, |
| 313 | const void *pkey, |
| 314 | struct net_device *dev) |
| 315 | { |
| 316 | return __neigh_create(tbl, pkey, dev, true); |
| 317 | } |
| 318 | void neigh_destroy(struct neighbour *neigh); |
| 319 | int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb); |
| 320 | int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags, |
| 321 | u32 nlmsg_pid); |
| 322 | void __neigh_set_probe_once(struct neighbour *neigh); |
| 323 | bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl); |
| 324 | void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); |
| 325 | int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); |
| 326 | int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb); |
| 327 | int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb); |
| 328 | int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb); |
| 329 | struct neighbour *neigh_event_ns(struct neigh_table *tbl, |
| 330 | u8 *lladdr, void *saddr, |
| 331 | struct net_device *dev); |
| 332 | |
| 333 | struct neigh_parms *neigh_parms_alloc(struct net_device *dev, |
| 334 | struct neigh_table *tbl); |
| 335 | void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms); |
| 336 | |
| 337 | static inline |
| 338 | struct net *neigh_parms_net(const struct neigh_parms *parms) |
| 339 | { |
| 340 | return read_pnet(&parms->net); |
| 341 | } |
| 342 | |
| 343 | unsigned long neigh_rand_reach_time(unsigned long base); |
| 344 | |
| 345 | void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, |
| 346 | struct sk_buff *skb); |
| 347 | struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net, |
| 348 | const void *key, struct net_device *dev, |
| 349 | int creat); |
| 350 | struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net, |
| 351 | const void *key, struct net_device *dev); |
| 352 | int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, |
| 353 | struct net_device *dev); |
| 354 | |
| 355 | static inline struct net *pneigh_net(const struct pneigh_entry *pneigh) |
| 356 | { |
| 357 | return read_pnet(&pneigh->net); |
| 358 | } |
| 359 | |
| 360 | void neigh_app_ns(struct neighbour *n); |
| 361 | void neigh_for_each(struct neigh_table *tbl, |
| 362 | void (*cb)(struct neighbour *, void *), void *cookie); |
| 363 | void __neigh_for_each_release(struct neigh_table *tbl, |
| 364 | int (*cb)(struct neighbour *)); |
| 365 | int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *); |
| 366 | void pneigh_for_each(struct neigh_table *tbl, |
| 367 | void (*cb)(struct pneigh_entry *)); |
| 368 | |
| 369 | struct neigh_seq_state { |
| 370 | struct seq_net_private p; |
| 371 | struct neigh_table *tbl; |
| 372 | struct neigh_hash_table *nht; |
| 373 | void *(*neigh_sub_iter)(struct neigh_seq_state *state, |
| 374 | struct neighbour *n, loff_t *pos); |
| 375 | unsigned int bucket; |
| 376 | unsigned int flags; |
| 377 | #define NEIGH_SEQ_NEIGH_ONLY 0x00000001 |
| 378 | #define NEIGH_SEQ_IS_PNEIGH 0x00000002 |
| 379 | #define NEIGH_SEQ_SKIP_NOARP 0x00000004 |
| 380 | }; |
| 381 | void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *, |
| 382 | unsigned int); |
| 383 | void *neigh_seq_next(struct seq_file *, void *, loff_t *); |
| 384 | void neigh_seq_stop(struct seq_file *, void *); |
| 385 | |
| 386 | int neigh_proc_dointvec(struct ctl_table *ctl, int write, |
| 387 | void __user *buffer, size_t *lenp, loff_t *ppos); |
| 388 | int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, |
| 389 | void __user *buffer, |
| 390 | size_t *lenp, loff_t *ppos); |
| 391 | int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write, |
| 392 | void __user *buffer, |
| 393 | size_t *lenp, loff_t *ppos); |
| 394 | |
| 395 | int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, |
| 396 | proc_handler *proc_handler); |
| 397 | void neigh_sysctl_unregister(struct neigh_parms *p); |
| 398 | |
| 399 | static inline void __neigh_parms_put(struct neigh_parms *parms) |
| 400 | { |
| 401 | refcount_dec(&parms->refcnt); |
| 402 | } |
| 403 | |
| 404 | static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms) |
| 405 | { |
| 406 | refcount_inc(&parms->refcnt); |
| 407 | return parms; |
| 408 | } |
| 409 | |
| 410 | /* |
| 411 | * Neighbour references |
| 412 | */ |
| 413 | |
| 414 | static inline void neigh_release(struct neighbour *neigh) |
| 415 | { |
| 416 | if (refcount_dec_and_test(&neigh->refcnt)) |
| 417 | neigh_destroy(neigh); |
| 418 | } |
| 419 | |
| 420 | static inline struct neighbour * neigh_clone(struct neighbour *neigh) |
| 421 | { |
| 422 | if (neigh) |
| 423 | refcount_inc(&neigh->refcnt); |
| 424 | return neigh; |
| 425 | } |
| 426 | |
| 427 | #define neigh_hold(n) refcount_inc(&(n)->refcnt) |
| 428 | |
| 429 | static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) |
| 430 | { |
| 431 | unsigned long now = jiffies; |
| 432 | |
| 433 | if (neigh->used != now) |
| 434 | neigh->used = now; |
| 435 | if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) |
| 436 | return __neigh_event_send(neigh, skb); |
| 437 | return 0; |
| 438 | } |
| 439 | |
| 440 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
| 441 | static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) |
| 442 | { |
| 443 | unsigned int seq, hh_alen; |
| 444 | |
| 445 | do { |
| 446 | seq = read_seqbegin(&hh->hh_lock); |
| 447 | hh_alen = HH_DATA_ALIGN(ETH_HLEN); |
| 448 | memcpy(skb->data - hh_alen, hh->hh_data, ETH_ALEN + hh_alen - ETH_HLEN); |
| 449 | } while (read_seqretry(&hh->hh_lock, seq)); |
| 450 | return 0; |
| 451 | } |
| 452 | #endif |
| 453 | |
| 454 | static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) |
| 455 | { |
| 456 | unsigned int hh_alen = 0; |
| 457 | unsigned int seq; |
| 458 | unsigned int hh_len; |
| 459 | |
| 460 | do { |
| 461 | seq = read_seqbegin(&hh->hh_lock); |
| 462 | hh_len = hh->hh_len; |
| 463 | if (likely(hh_len <= HH_DATA_MOD)) { |
| 464 | hh_alen = HH_DATA_MOD; |
| 465 | |
| 466 | /* skb_push() would proceed silently if we have room for |
| 467 | * the unaligned size but not for the aligned size: |
| 468 | * check headroom explicitly. |
| 469 | */ |
| 470 | if (likely(skb_headroom(skb) >= HH_DATA_MOD)) { |
| 471 | /* this is inlined by gcc */ |
| 472 | memcpy(skb->data - HH_DATA_MOD, hh->hh_data, |
| 473 | HH_DATA_MOD); |
| 474 | } |
| 475 | } else { |
| 476 | hh_alen = HH_DATA_ALIGN(hh_len); |
| 477 | |
| 478 | if (likely(skb_headroom(skb) >= hh_alen)) { |
| 479 | memcpy(skb->data - hh_alen, hh->hh_data, |
| 480 | hh_alen); |
| 481 | } |
| 482 | } |
| 483 | } while (read_seqretry(&hh->hh_lock, seq)); |
| 484 | |
| 485 | if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) { |
| 486 | kfree_skb(skb); |
| 487 | return NET_XMIT_DROP; |
| 488 | } |
| 489 | |
| 490 | __skb_push(skb, hh_len); |
| 491 | return dev_queue_xmit(skb); |
| 492 | } |
| 493 | |
| 494 | static inline int neigh_output(struct neighbour *n, struct sk_buff *skb) |
| 495 | { |
| 496 | const struct hh_cache *hh = &n->hh; |
| 497 | |
| 498 | if ((n->nud_state & NUD_CONNECTED) && hh->hh_len) |
| 499 | return neigh_hh_output(hh, skb); |
| 500 | else |
| 501 | return n->output(n, skb); |
| 502 | } |
| 503 | |
| 504 | static inline struct neighbour * |
| 505 | __neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat) |
| 506 | { |
| 507 | struct neighbour *n = neigh_lookup(tbl, pkey, dev); |
| 508 | |
| 509 | if (n || !creat) |
| 510 | return n; |
| 511 | |
| 512 | n = neigh_create(tbl, pkey, dev); |
| 513 | return IS_ERR(n) ? NULL : n; |
| 514 | } |
| 515 | |
| 516 | static inline struct neighbour * |
| 517 | __neigh_lookup_errno(struct neigh_table *tbl, const void *pkey, |
| 518 | struct net_device *dev) |
| 519 | { |
| 520 | struct neighbour *n = neigh_lookup(tbl, pkey, dev); |
| 521 | |
| 522 | if (n) |
| 523 | return n; |
| 524 | |
| 525 | return neigh_create(tbl, pkey, dev); |
| 526 | } |
| 527 | |
| 528 | struct neighbour_cb { |
| 529 | unsigned long sched_next; |
| 530 | unsigned int flags; |
| 531 | }; |
| 532 | |
| 533 | #define LOCALLY_ENQUEUED 0x1 |
| 534 | |
| 535 | #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) |
| 536 | |
| 537 | static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, |
| 538 | const struct net_device *dev) |
| 539 | { |
| 540 | unsigned int seq; |
| 541 | |
| 542 | do { |
| 543 | seq = read_seqbegin(&n->ha_lock); |
| 544 | memcpy(dst, n->ha, dev->addr_len); |
| 545 | } while (read_seqretry(&n->ha_lock, seq)); |
| 546 | } |
| 547 | |
| 548 | static inline void neigh_update_ext_learned(struct neighbour *neigh, u32 flags, |
| 549 | int *notify) |
| 550 | { |
| 551 | u8 ndm_flags = 0; |
| 552 | |
| 553 | if (!(flags & NEIGH_UPDATE_F_ADMIN)) |
| 554 | return; |
| 555 | |
| 556 | ndm_flags |= (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0; |
| 557 | if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) { |
| 558 | if (ndm_flags & NTF_EXT_LEARNED) |
| 559 | neigh->flags |= NTF_EXT_LEARNED; |
| 560 | else |
| 561 | neigh->flags &= ~NTF_EXT_LEARNED; |
| 562 | *notify = 1; |
| 563 | } |
| 564 | } |
| 565 | #endif |