Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __NET_PKT_SCHED_H |
| 3 | #define __NET_PKT_SCHED_H |
| 4 | |
| 5 | #include <linux/jiffies.h> |
| 6 | #include <linux/ktime.h> |
| 7 | #include <linux/if_vlan.h> |
| 8 | #include <linux/netdevice.h> |
| 9 | #include <net/sch_generic.h> |
| 10 | #include <net/net_namespace.h> |
| 11 | #include <uapi/linux/pkt_sched.h> |
| 12 | |
| 13 | #define DEFAULT_TX_QUEUE_LEN 1000 |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 14 | #define STAB_SIZE_LOG_MAX 30 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 15 | |
| 16 | struct qdisc_walker { |
| 17 | int stop; |
| 18 | int skip; |
| 19 | int count; |
| 20 | int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *); |
| 21 | }; |
| 22 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 23 | static inline void *qdisc_priv(struct Qdisc *q) |
| 24 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 25 | return &q->privdata; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 26 | } |
| 27 | |
| 28 | /* |
| 29 | Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth |
| 30 | |
| 31 | Normal IP packet size ~ 512byte, hence: |
| 32 | |
| 33 | 0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for |
| 34 | 10Mbit ethernet. |
| 35 | |
| 36 | 10msec resolution -> <50Kbit/sec. |
| 37 | |
| 38 | The result: [34]86 is not good choice for QoS router :-( |
| 39 | |
| 40 | The things are not so bad, because we may use artificial |
| 41 | clock evaluated by integration of network data flow |
| 42 | in the most critical places. |
| 43 | */ |
| 44 | |
| 45 | typedef u64 psched_time_t; |
| 46 | typedef long psched_tdiff_t; |
| 47 | |
| 48 | /* Avoid doing 64 bit divide */ |
| 49 | #define PSCHED_SHIFT 6 |
| 50 | #define PSCHED_TICKS2NS(x) ((s64)(x) << PSCHED_SHIFT) |
| 51 | #define PSCHED_NS2TICKS(x) ((x) >> PSCHED_SHIFT) |
| 52 | |
| 53 | #define PSCHED_TICKS_PER_SEC PSCHED_NS2TICKS(NSEC_PER_SEC) |
| 54 | #define PSCHED_PASTPERFECT 0 |
| 55 | |
| 56 | static inline psched_time_t psched_get_time(void) |
| 57 | { |
| 58 | return PSCHED_NS2TICKS(ktime_get_ns()); |
| 59 | } |
| 60 | |
| 61 | static inline psched_tdiff_t |
| 62 | psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound) |
| 63 | { |
| 64 | return min(tv1 - tv2, bound); |
| 65 | } |
| 66 | |
| 67 | struct qdisc_watchdog { |
| 68 | u64 last_expires; |
| 69 | struct hrtimer timer; |
| 70 | struct Qdisc *qdisc; |
| 71 | }; |
| 72 | |
| 73 | void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc, |
| 74 | clockid_t clockid); |
| 75 | void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 76 | |
| 77 | void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires, |
| 78 | u64 delta_ns); |
| 79 | |
| 80 | static inline void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, |
| 81 | u64 expires) |
| 82 | { |
| 83 | return qdisc_watchdog_schedule_range_ns(wd, expires, 0ULL); |
| 84 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 85 | |
| 86 | static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, |
| 87 | psched_time_t expires) |
| 88 | { |
| 89 | qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires)); |
| 90 | } |
| 91 | |
| 92 | void qdisc_watchdog_cancel(struct qdisc_watchdog *wd); |
| 93 | |
| 94 | extern struct Qdisc_ops pfifo_qdisc_ops; |
| 95 | extern struct Qdisc_ops bfifo_qdisc_ops; |
| 96 | extern struct Qdisc_ops pfifo_head_drop_qdisc_ops; |
| 97 | |
| 98 | int fifo_set_limit(struct Qdisc *q, unsigned int limit); |
| 99 | struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, |
| 100 | unsigned int limit, |
| 101 | struct netlink_ext_ack *extack); |
| 102 | |
| 103 | int register_qdisc(struct Qdisc_ops *qops); |
| 104 | int unregister_qdisc(struct Qdisc_ops *qops); |
| 105 | void qdisc_get_default(char *id, size_t len); |
| 106 | int qdisc_set_default(const char *id); |
| 107 | |
| 108 | void qdisc_hash_add(struct Qdisc *q, bool invisible); |
| 109 | void qdisc_hash_del(struct Qdisc *q); |
| 110 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 111 | struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 112 | struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, |
| 113 | struct nlattr *tab, |
| 114 | struct netlink_ext_ack *extack); |
| 115 | void qdisc_put_rtab(struct qdisc_rate_table *tab); |
| 116 | void qdisc_put_stab(struct qdisc_size_table *tab); |
| 117 | void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc); |
| 118 | bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, |
| 119 | struct net_device *dev, struct netdev_queue *txq, |
| 120 | spinlock_t *root_lock, bool validate); |
| 121 | |
| 122 | void __qdisc_run(struct Qdisc *q); |
| 123 | |
| 124 | static inline void qdisc_run(struct Qdisc *q) |
| 125 | { |
| 126 | if (qdisc_run_begin(q)) { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 127 | __qdisc_run(q); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 128 | qdisc_run_end(q); |
| 129 | } |
| 130 | } |
| 131 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 132 | /* Calculate maximal size of packet seen by hard_start_xmit |
| 133 | routine of this device. |
| 134 | */ |
| 135 | static inline unsigned int psched_mtu(const struct net_device *dev) |
| 136 | { |
| 137 | return dev->mtu + dev->hard_header_len; |
| 138 | } |
| 139 | |
| 140 | static inline struct net *qdisc_net(struct Qdisc *q) |
| 141 | { |
| 142 | return dev_net(q->dev_queue->dev); |
| 143 | } |
| 144 | |
| 145 | struct tc_cbs_qopt_offload { |
| 146 | u8 enable; |
| 147 | s32 queue; |
| 148 | s32 hicredit; |
| 149 | s32 locredit; |
| 150 | s32 idleslope; |
| 151 | s32 sendslope; |
| 152 | }; |
| 153 | |
| 154 | struct tc_etf_qopt_offload { |
| 155 | u8 enable; |
| 156 | s32 queue; |
| 157 | }; |
| 158 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 159 | struct tc_taprio_sched_entry { |
| 160 | u8 command; /* TC_TAPRIO_CMD_* */ |
| 161 | |
| 162 | /* The gate_mask in the offloading side refers to traffic classes */ |
| 163 | u32 gate_mask; |
| 164 | u32 interval; |
| 165 | }; |
| 166 | |
| 167 | struct tc_taprio_qopt_offload { |
| 168 | u8 enable; |
| 169 | ktime_t base_time; |
| 170 | u64 cycle_time; |
| 171 | u64 cycle_time_extension; |
| 172 | |
| 173 | size_t num_entries; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 174 | struct tc_taprio_sched_entry entries[]; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 175 | }; |
| 176 | |
| 177 | /* Reference counting */ |
| 178 | struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload |
| 179 | *offload); |
| 180 | void taprio_offload_free(struct tc_taprio_qopt_offload *offload); |
| 181 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 182 | #endif |