Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __NET_PKT_SCHED_H |
| 3 | #define __NET_PKT_SCHED_H |
| 4 | |
| 5 | #include <linux/jiffies.h> |
| 6 | #include <linux/ktime.h> |
| 7 | #include <linux/if_vlan.h> |
| 8 | #include <linux/netdevice.h> |
| 9 | #include <net/sch_generic.h> |
| 10 | #include <net/net_namespace.h> |
| 11 | #include <uapi/linux/pkt_sched.h> |
| 12 | |
| 13 | #define DEFAULT_TX_QUEUE_LEN 1000 |
| 14 | |
| 15 | struct qdisc_walker { |
| 16 | int stop; |
| 17 | int skip; |
| 18 | int count; |
| 19 | int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *); |
| 20 | }; |
| 21 | |
| 22 | #define QDISC_ALIGNTO 64 |
| 23 | #define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1)) |
| 24 | |
| 25 | static inline void *qdisc_priv(struct Qdisc *q) |
| 26 | { |
| 27 | return (char *) q + QDISC_ALIGN(sizeof(struct Qdisc)); |
| 28 | } |
| 29 | |
| 30 | /* |
| 31 | Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth |
| 32 | |
| 33 | Normal IP packet size ~ 512byte, hence: |
| 34 | |
| 35 | 0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for |
| 36 | 10Mbit ethernet. |
| 37 | |
| 38 | 10msec resolution -> <50Kbit/sec. |
| 39 | |
| 40 | The result: [34]86 is not good choice for QoS router :-( |
| 41 | |
| 42 | The things are not so bad, because we may use artificial |
| 43 | clock evaluated by integration of network data flow |
| 44 | in the most critical places. |
| 45 | */ |
| 46 | |
| 47 | typedef u64 psched_time_t; |
| 48 | typedef long psched_tdiff_t; |
| 49 | |
| 50 | /* Avoid doing 64 bit divide */ |
| 51 | #define PSCHED_SHIFT 6 |
| 52 | #define PSCHED_TICKS2NS(x) ((s64)(x) << PSCHED_SHIFT) |
| 53 | #define PSCHED_NS2TICKS(x) ((x) >> PSCHED_SHIFT) |
| 54 | |
| 55 | #define PSCHED_TICKS_PER_SEC PSCHED_NS2TICKS(NSEC_PER_SEC) |
| 56 | #define PSCHED_PASTPERFECT 0 |
| 57 | |
| 58 | static inline psched_time_t psched_get_time(void) |
| 59 | { |
| 60 | return PSCHED_NS2TICKS(ktime_get_ns()); |
| 61 | } |
| 62 | |
| 63 | static inline psched_tdiff_t |
| 64 | psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound) |
| 65 | { |
| 66 | return min(tv1 - tv2, bound); |
| 67 | } |
| 68 | |
| 69 | struct qdisc_watchdog { |
| 70 | u64 last_expires; |
| 71 | struct hrtimer timer; |
| 72 | struct Qdisc *qdisc; |
| 73 | }; |
| 74 | |
| 75 | void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc, |
| 76 | clockid_t clockid); |
| 77 | void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); |
| 78 | void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires); |
| 79 | |
| 80 | static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, |
| 81 | psched_time_t expires) |
| 82 | { |
| 83 | qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires)); |
| 84 | } |
| 85 | |
| 86 | void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); |
| 87 | |
| 88 | extern struct Qdisc_ops pfifo_qdisc_ops; |
| 89 | extern struct Qdisc_ops bfifo_qdisc_ops; |
| 90 | extern struct Qdisc_ops pfifo_head_drop_qdisc_ops; |
| 91 | |
| 92 | int fifo_set_limit(struct Qdisc *q, unsigned int limit); |
| 93 | struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, |
| 94 | unsigned int limit, |
| 95 | struct netlink_ext_ack *extack); |
| 96 | |
| 97 | int register_qdisc(struct Qdisc_ops *qops); |
| 98 | int unregister_qdisc(struct Qdisc_ops *qops); |
| 99 | void qdisc_get_default(char *id, size_t len); |
| 100 | int qdisc_set_default(const char *id); |
| 101 | |
| 102 | void qdisc_hash_add(struct Qdisc *q, bool invisible); |
| 103 | void qdisc_hash_del(struct Qdisc *q); |
| 104 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 105 | struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 106 | struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, |
| 107 | struct nlattr *tab, |
| 108 | struct netlink_ext_ack *extack); |
| 109 | void qdisc_put_rtab(struct qdisc_rate_table *tab); |
| 110 | void qdisc_put_stab(struct qdisc_size_table *tab); |
| 111 | void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc); |
| 112 | bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, |
| 113 | struct net_device *dev, struct netdev_queue *txq, |
| 114 | spinlock_t *root_lock, bool validate); |
| 115 | |
| 116 | void __qdisc_run(struct Qdisc *q); |
| 117 | |
| 118 | static inline void qdisc_run(struct Qdisc *q) |
| 119 | { |
| 120 | if (qdisc_run_begin(q)) { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 121 | __qdisc_run(q); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 122 | qdisc_run_end(q); |
| 123 | } |
| 124 | } |
| 125 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 126 | /* Calculate maximal size of packet seen by hard_start_xmit |
| 127 | routine of this device. |
| 128 | */ |
| 129 | static inline unsigned int psched_mtu(const struct net_device *dev) |
| 130 | { |
| 131 | return dev->mtu + dev->hard_header_len; |
| 132 | } |
| 133 | |
| 134 | static inline struct net *qdisc_net(struct Qdisc *q) |
| 135 | { |
| 136 | return dev_net(q->dev_queue->dev); |
| 137 | } |
| 138 | |
| 139 | struct tc_cbs_qopt_offload { |
| 140 | u8 enable; |
| 141 | s32 queue; |
| 142 | s32 hicredit; |
| 143 | s32 locredit; |
| 144 | s32 idleslope; |
| 145 | s32 sendslope; |
| 146 | }; |
| 147 | |
| 148 | struct tc_etf_qopt_offload { |
| 149 | u8 enable; |
| 150 | s32 queue; |
| 151 | }; |
| 152 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 153 | struct tc_taprio_sched_entry { |
| 154 | u8 command; /* TC_TAPRIO_CMD_* */ |
| 155 | |
| 156 | /* The gate_mask in the offloading side refers to traffic classes */ |
| 157 | u32 gate_mask; |
| 158 | u32 interval; |
| 159 | }; |
| 160 | |
| 161 | struct tc_taprio_qopt_offload { |
| 162 | u8 enable; |
| 163 | ktime_t base_time; |
| 164 | u64 cycle_time; |
| 165 | u64 cycle_time_extension; |
| 166 | |
| 167 | size_t num_entries; |
| 168 | struct tc_taprio_sched_entry entries[0]; |
| 169 | }; |
| 170 | |
| 171 | /* Reference counting */ |
| 172 | struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload |
| 173 | *offload); |
| 174 | void taprio_offload_free(struct tc_taprio_qopt_offload *offload); |
| 175 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 176 | #endif |