Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-1.0+ |
| 2 | /* generic HDLC line discipline for Linux |
| 3 | * |
| 4 | * Written by Paul Fulghum paulkf@microgate.com |
| 5 | * for Microgate Corporation |
| 6 | * |
| 7 | * Microgate and SyncLink are registered trademarks of Microgate Corporation |
| 8 | * |
| 9 | * Adapted from ppp.c, written by Michael Callahan <callahan@maths.ox.ac.uk>, |
| 10 | * Al Longyear <longyear@netcom.com>, |
| 11 | * Paul Mackerras <Paul.Mackerras@cs.anu.edu.au> |
| 12 | * |
| 13 | * Original release 01/11/99 |
| 14 | * |
| 15 | * This module implements the tty line discipline N_HDLC for use with |
| 16 | * tty device drivers that support bit-synchronous HDLC communications. |
| 17 | * |
| 18 | * All HDLC data is frame oriented which means: |
| 19 | * |
| 20 | * 1. tty write calls represent one complete transmit frame of data |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 21 | * The device driver should accept the complete frame or none of |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 22 | * the frame (busy) in the write method. Each write call should have |
| 23 | * a byte count in the range of 2-65535 bytes (2 is min HDLC frame |
| 24 | * with 1 addr byte and 1 ctrl byte). The max byte count of 65535 |
| 25 | * should include any crc bytes required. For example, when using |
| 26 | * CCITT CRC32, 4 crc bytes are required, so the maximum size frame |
| 27 | * the application may transmit is limited to 65531 bytes. For CCITT |
| 28 | * CRC16, the maximum application frame size would be 65533. |
| 29 | * |
| 30 | * |
| 31 | * 2. receive callbacks from the device driver represents |
| 32 | * one received frame. The device driver should bypass |
| 33 | * the tty flip buffer and call the line discipline receive |
| 34 | * callback directly to avoid fragmenting or concatenating |
| 35 | * multiple frames into a single receive callback. |
| 36 | * |
| 37 | * The HDLC line discipline queues the receive frames in separate |
| 38 | * buffers so complete receive frames can be returned by the |
| 39 | * tty read calls. |
| 40 | * |
| 41 | * 3. tty read calls returns an entire frame of data or nothing. |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 42 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 43 | * 4. all send and receive data is considered raw. No processing |
| 44 | * or translation is performed by the line discipline, regardless |
| 45 | * of the tty flags |
| 46 | * |
| 47 | * 5. When line discipline is queried for the amount of receive |
| 48 | * data available (FIOC), 0 is returned if no data available, |
| 49 | * otherwise the count of the next available frame is returned. |
| 50 | * (instead of the sum of all received frame counts). |
| 51 | * |
| 52 | * These conventions allow the standard tty programming interface |
| 53 | * to be used for synchronous HDLC applications when used with |
| 54 | * this line discipline (or another line discipline that is frame |
| 55 | * oriented such as N_PPP). |
| 56 | * |
| 57 | * The SyncLink driver (synclink.c) implements both asynchronous |
| 58 | * (using standard line discipline N_TTY) and synchronous HDLC |
| 59 | * (using N_HDLC) communications, with the latter using the above |
| 60 | * conventions. |
| 61 | * |
| 62 | * This implementation is very basic and does not maintain |
| 63 | * any statistics. The main point is to enforce the raw data |
| 64 | * and frame orientation of HDLC communications. |
| 65 | * |
| 66 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED |
| 67 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
| 68 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
| 69 | * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, |
| 70 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 71 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
| 72 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 73 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
| 74 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 75 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
| 76 | * OF THE POSSIBILITY OF SUCH DAMAGE. |
| 77 | */ |
| 78 | |
| 79 | #define HDLC_MAGIC 0x239e |
| 80 | |
| 81 | #include <linux/module.h> |
| 82 | #include <linux/init.h> |
| 83 | #include <linux/kernel.h> |
| 84 | #include <linux/sched.h> |
| 85 | #include <linux/types.h> |
| 86 | #include <linux/fcntl.h> |
| 87 | #include <linux/interrupt.h> |
| 88 | #include <linux/ptrace.h> |
| 89 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 90 | #include <linux/poll.h> |
| 91 | #include <linux/in.h> |
| 92 | #include <linux/ioctl.h> |
| 93 | #include <linux/slab.h> |
| 94 | #include <linux/tty.h> |
| 95 | #include <linux/errno.h> |
| 96 | #include <linux/string.h> /* used in new tty drivers */ |
| 97 | #include <linux/signal.h> /* used in new tty drivers */ |
| 98 | #include <linux/if.h> |
| 99 | #include <linux/bitops.h> |
| 100 | |
| 101 | #include <asm/termios.h> |
| 102 | #include <linux/uaccess.h> |
| 103 | |
| 104 | /* |
| 105 | * Buffers for individual HDLC frames |
| 106 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 107 | #define MAX_HDLC_FRAME_SIZE 65535 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 108 | #define DEFAULT_RX_BUF_COUNT 10 |
| 109 | #define MAX_RX_BUF_COUNT 60 |
| 110 | #define DEFAULT_TX_BUF_COUNT 3 |
| 111 | |
| 112 | struct n_hdlc_buf { |
| 113 | struct list_head list_item; |
| 114 | int count; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 115 | char buf[]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 116 | }; |
| 117 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 118 | struct n_hdlc_buf_list { |
| 119 | struct list_head list; |
| 120 | int count; |
| 121 | spinlock_t spinlock; |
| 122 | }; |
| 123 | |
| 124 | /** |
| 125 | * struct n_hdlc - per device instance data structure |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 126 | * @magic: magic value for structure |
| 127 | * @tbusy: reentrancy flag for tx wakeup code |
| 128 | * @woke_up: tx wakeup needs to be run again as it was called while @tbusy |
| 129 | * @tx_buf_list: list of pending transmit frame buffers |
| 130 | * @rx_buf_list: list of received frame buffers |
| 131 | * @tx_free_buf_list: list unused transmit frame buffers |
| 132 | * @rx_free_buf_list: list unused received frame buffers |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 133 | */ |
| 134 | struct n_hdlc { |
| 135 | int magic; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 136 | bool tbusy; |
| 137 | bool woke_up; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 138 | struct n_hdlc_buf_list tx_buf_list; |
| 139 | struct n_hdlc_buf_list rx_buf_list; |
| 140 | struct n_hdlc_buf_list tx_free_buf_list; |
| 141 | struct n_hdlc_buf_list rx_free_buf_list; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 142 | struct work_struct write_work; |
| 143 | struct tty_struct *tty_for_write_work; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 144 | }; |
| 145 | |
| 146 | /* |
| 147 | * HDLC buffer list manipulation functions |
| 148 | */ |
| 149 | static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list, |
| 150 | struct n_hdlc_buf *buf); |
| 151 | static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, |
| 152 | struct n_hdlc_buf *buf); |
| 153 | static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list); |
| 154 | |
| 155 | /* Local functions */ |
| 156 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 157 | static struct n_hdlc *n_hdlc_alloc(void); |
| 158 | static void n_hdlc_tty_write_work(struct work_struct *work); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 159 | |
| 160 | /* max frame size for memory allocations */ |
| 161 | static int maxframe = 4096; |
| 162 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 163 | static void flush_rx_queue(struct tty_struct *tty) |
| 164 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 165 | struct n_hdlc *n_hdlc = tty->disc_data; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 166 | struct n_hdlc_buf *buf; |
| 167 | |
| 168 | while ((buf = n_hdlc_buf_get(&n_hdlc->rx_buf_list))) |
| 169 | n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, buf); |
| 170 | } |
| 171 | |
| 172 | static void flush_tx_queue(struct tty_struct *tty) |
| 173 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 174 | struct n_hdlc *n_hdlc = tty->disc_data; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 175 | struct n_hdlc_buf *buf; |
| 176 | |
| 177 | while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list))) |
| 178 | n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf); |
| 179 | } |
| 180 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 181 | static void n_hdlc_free_buf_list(struct n_hdlc_buf_list *list) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 182 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 183 | struct n_hdlc_buf *buf; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 184 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 185 | do { |
| 186 | buf = n_hdlc_buf_get(list); |
| 187 | kfree(buf); |
| 188 | } while (buf); |
| 189 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 190 | |
| 191 | /** |
| 192 | * n_hdlc_tty_close - line discipline close |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 193 | * @tty: pointer to tty info structure |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 194 | * |
| 195 | * Called when the line discipline is changed to something |
| 196 | * else, the tty is closed, or the tty detects a hangup. |
| 197 | */ |
| 198 | static void n_hdlc_tty_close(struct tty_struct *tty) |
| 199 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 200 | struct n_hdlc *n_hdlc = tty->disc_data; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 201 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 202 | if (n_hdlc->magic != HDLC_MAGIC) { |
| 203 | pr_warn("n_hdlc: trying to close unopened tty!\n"); |
| 204 | return; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 205 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 206 | #if defined(TTY_NO_WRITE_SPLIT) |
| 207 | clear_bit(TTY_NO_WRITE_SPLIT, &tty->flags); |
| 208 | #endif |
| 209 | tty->disc_data = NULL; |
| 210 | |
| 211 | /* Ensure that the n_hdlcd process is not hanging on select()/poll() */ |
| 212 | wake_up_interruptible(&tty->read_wait); |
| 213 | wake_up_interruptible(&tty->write_wait); |
| 214 | |
| 215 | cancel_work_sync(&n_hdlc->write_work); |
| 216 | |
| 217 | n_hdlc_free_buf_list(&n_hdlc->rx_free_buf_list); |
| 218 | n_hdlc_free_buf_list(&n_hdlc->tx_free_buf_list); |
| 219 | n_hdlc_free_buf_list(&n_hdlc->rx_buf_list); |
| 220 | n_hdlc_free_buf_list(&n_hdlc->tx_buf_list); |
| 221 | kfree(n_hdlc); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 222 | } /* end of n_hdlc_tty_close() */ |
| 223 | |
| 224 | /** |
| 225 | * n_hdlc_tty_open - called when line discipline changed to n_hdlc |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 226 | * @tty: pointer to tty info structure |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 227 | * |
| 228 | * Returns 0 if success, otherwise error code |
| 229 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 230 | static int n_hdlc_tty_open(struct tty_struct *tty) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 231 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 232 | struct n_hdlc *n_hdlc = tty->disc_data; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 233 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 234 | pr_debug("%s() called (device=%s)\n", __func__, tty->name); |
| 235 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 236 | /* There should not be an existing table for this slot. */ |
| 237 | if (n_hdlc) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 238 | pr_err("%s: tty already associated!\n", __func__); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 239 | return -EEXIST; |
| 240 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 241 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 242 | n_hdlc = n_hdlc_alloc(); |
| 243 | if (!n_hdlc) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 244 | pr_err("%s: n_hdlc_alloc failed\n", __func__); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 245 | return -ENFILE; |
| 246 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 247 | |
| 248 | INIT_WORK(&n_hdlc->write_work, n_hdlc_tty_write_work); |
| 249 | n_hdlc->tty_for_write_work = tty; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 250 | tty->disc_data = n_hdlc; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 251 | tty->receive_room = 65536; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 252 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 253 | /* change tty_io write() to not split large writes into 8K chunks */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 254 | set_bit(TTY_NO_WRITE_SPLIT, &tty->flags); |
| 255 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 256 | /* flush receive data from driver */ |
| 257 | tty_driver_flush_buffer(tty); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 258 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 259 | return 0; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 260 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 261 | } /* end of n_tty_hdlc_open() */ |
| 262 | |
| 263 | /** |
| 264 | * n_hdlc_send_frames - send frames on pending send buffer list |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 265 | * @n_hdlc: pointer to ldisc instance data |
| 266 | * @tty: pointer to tty instance data |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 267 | * |
| 268 | * Send frames on pending send buffer list until the driver does not accept a |
| 269 | * frame (busy) this function is called after adding a frame to the send buffer |
| 270 | * list and by the tty wakeup callback. |
| 271 | */ |
| 272 | static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) |
| 273 | { |
| 274 | register int actual; |
| 275 | unsigned long flags; |
| 276 | struct n_hdlc_buf *tbuf; |
| 277 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 278 | check_again: |
| 279 | |
| 280 | spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 281 | if (n_hdlc->tbusy) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 282 | n_hdlc->woke_up = true; |
| 283 | spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 284 | return; |
| 285 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 286 | n_hdlc->tbusy = true; |
| 287 | n_hdlc->woke_up = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 288 | spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); |
| 289 | |
| 290 | tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); |
| 291 | while (tbuf) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 292 | pr_debug("sending frame %p, count=%d\n", tbuf, tbuf->count); |
| 293 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 294 | /* Send the next block of data to device */ |
| 295 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); |
| 296 | actual = tty->ops->write(tty, tbuf->buf, tbuf->count); |
| 297 | |
| 298 | /* rollback was possible and has been done */ |
| 299 | if (actual == -ERESTARTSYS) { |
| 300 | n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf); |
| 301 | break; |
| 302 | } |
| 303 | /* if transmit error, throw frame away by */ |
| 304 | /* pretending it was accepted by driver */ |
| 305 | if (actual < 0) |
| 306 | actual = tbuf->count; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 307 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 308 | if (actual == tbuf->count) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 309 | pr_debug("frame %p completed\n", tbuf); |
| 310 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 311 | /* free current transmit buffer */ |
| 312 | n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf); |
| 313 | |
| 314 | /* wait up sleeping writers */ |
| 315 | wake_up_interruptible(&tty->write_wait); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 316 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 317 | /* get next pending transmit buffer */ |
| 318 | tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); |
| 319 | } else { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 320 | pr_debug("frame %p pending\n", tbuf); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 321 | |
| 322 | /* |
| 323 | * the buffer was not accepted by driver, |
| 324 | * return it back into tx queue |
| 325 | */ |
| 326 | n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf); |
| 327 | break; |
| 328 | } |
| 329 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 330 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 331 | if (!tbuf) |
| 332 | clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 333 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 334 | /* Clear the re-entry flag */ |
| 335 | spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 336 | n_hdlc->tbusy = false; |
| 337 | spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 338 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 339 | if (n_hdlc->woke_up) |
| 340 | goto check_again; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 341 | } /* end of n_hdlc_send_frames() */ |
| 342 | |
| 343 | /** |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 344 | * n_hdlc_tty_write_work - Asynchronous callback for transmit wakeup |
| 345 | * @work: pointer to work_struct |
| 346 | * |
| 347 | * Called when low level device driver can accept more send data. |
| 348 | */ |
| 349 | static void n_hdlc_tty_write_work(struct work_struct *work) |
| 350 | { |
| 351 | struct n_hdlc *n_hdlc = container_of(work, struct n_hdlc, write_work); |
| 352 | struct tty_struct *tty = n_hdlc->tty_for_write_work; |
| 353 | |
| 354 | n_hdlc_send_frames(n_hdlc, tty); |
| 355 | } /* end of n_hdlc_tty_write_work() */ |
| 356 | |
| 357 | /** |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 358 | * n_hdlc_tty_wakeup - Callback for transmit wakeup |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 359 | * @tty: pointer to associated tty instance data |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 360 | * |
| 361 | * Called when low level device driver can accept more send data. |
| 362 | */ |
| 363 | static void n_hdlc_tty_wakeup(struct tty_struct *tty) |
| 364 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 365 | struct n_hdlc *n_hdlc = tty->disc_data; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 366 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 367 | schedule_work(&n_hdlc->write_work); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 368 | } /* end of n_hdlc_tty_wakeup() */ |
| 369 | |
| 370 | /** |
| 371 | * n_hdlc_tty_receive - Called by tty driver when receive data is available |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 372 | * @tty: pointer to tty instance data |
| 373 | * @data: pointer to received data |
| 374 | * @flags: pointer to flags for data |
| 375 | * @count: count of received data in bytes |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 376 | * |
| 377 | * Called by tty low level driver when receive data is available. Data is |
| 378 | * interpreted as one HDLC frame. |
| 379 | */ |
| 380 | static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data, |
| 381 | char *flags, int count) |
| 382 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 383 | register struct n_hdlc *n_hdlc = tty->disc_data; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 384 | register struct n_hdlc_buf *buf; |
| 385 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 386 | pr_debug("%s() called count=%d\n", __func__, count); |
| 387 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 388 | /* verify line is using HDLC discipline */ |
| 389 | if (n_hdlc->magic != HDLC_MAGIC) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 390 | pr_err("line not using HDLC discipline\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 391 | return; |
| 392 | } |
| 393 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 394 | if (count > maxframe) { |
| 395 | pr_debug("rx count>maxframesize, data discarded\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 396 | return; |
| 397 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 398 | |
| 399 | /* get a free HDLC buffer */ |
| 400 | buf = n_hdlc_buf_get(&n_hdlc->rx_free_buf_list); |
| 401 | if (!buf) { |
| 402 | /* |
| 403 | * no buffers in free list, attempt to allocate another rx |
| 404 | * buffer unless the maximum count has been reached |
| 405 | */ |
| 406 | if (n_hdlc->rx_buf_list.count < MAX_RX_BUF_COUNT) |
| 407 | buf = kmalloc(struct_size(buf, buf, maxframe), |
| 408 | GFP_ATOMIC); |
| 409 | } |
| 410 | |
| 411 | if (!buf) { |
| 412 | pr_debug("no more rx buffers, data discarded\n"); |
| 413 | return; |
| 414 | } |
| 415 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 416 | /* copy received data to HDLC buffer */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 417 | memcpy(buf->buf, data, count); |
| 418 | buf->count = count; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 419 | |
| 420 | /* add HDLC buffer to list of received frames */ |
| 421 | n_hdlc_buf_put(&n_hdlc->rx_buf_list, buf); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 422 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 423 | /* wake up any blocked reads and perform async signalling */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 424 | wake_up_interruptible(&tty->read_wait); |
| 425 | if (tty->fasync != NULL) |
| 426 | kill_fasync(&tty->fasync, SIGIO, POLL_IN); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 427 | |
| 428 | } /* end of n_hdlc_tty_receive() */ |
| 429 | |
| 430 | /** |
| 431 | * n_hdlc_tty_read - Called to retrieve one frame of data (if available) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 432 | * @tty: pointer to tty instance data |
| 433 | * @file: pointer to open file object |
| 434 | * @buf: pointer to returned data buffer |
| 435 | * @nr: size of returned data buffer |
| 436 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 437 | * Returns the number of bytes returned or error code. |
| 438 | */ |
| 439 | static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 440 | __u8 *kbuf, size_t nr, |
| 441 | void **cookie, unsigned long offset) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 442 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 443 | struct n_hdlc *n_hdlc = tty->disc_data; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 444 | int ret = 0; |
| 445 | struct n_hdlc_buf *rbuf; |
| 446 | DECLARE_WAITQUEUE(wait, current); |
| 447 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 448 | /* Is this a repeated call for an rbuf we already found earlier? */ |
| 449 | rbuf = *cookie; |
| 450 | if (rbuf) |
| 451 | goto have_rbuf; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 452 | |
| 453 | add_wait_queue(&tty->read_wait, &wait); |
| 454 | |
| 455 | for (;;) { |
| 456 | if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { |
| 457 | ret = -EIO; |
| 458 | break; |
| 459 | } |
| 460 | if (tty_hung_up_p(file)) |
| 461 | break; |
| 462 | |
| 463 | set_current_state(TASK_INTERRUPTIBLE); |
| 464 | |
| 465 | rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 466 | if (rbuf) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 467 | break; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 468 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 469 | /* no data */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 470 | if (tty_io_nonblock(tty, file)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 471 | ret = -EAGAIN; |
| 472 | break; |
| 473 | } |
| 474 | |
| 475 | schedule(); |
| 476 | |
| 477 | if (signal_pending(current)) { |
| 478 | ret = -EINTR; |
| 479 | break; |
| 480 | } |
| 481 | } |
| 482 | |
| 483 | remove_wait_queue(&tty->read_wait, &wait); |
| 484 | __set_current_state(TASK_RUNNING); |
| 485 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 486 | if (!rbuf) |
| 487 | return ret; |
| 488 | *cookie = rbuf; |
| 489 | |
| 490 | have_rbuf: |
| 491 | /* Have we used it up entirely? */ |
| 492 | if (offset >= rbuf->count) |
| 493 | goto done_with_rbuf; |
| 494 | |
| 495 | /* More data to go, but can't copy any more? EOVERFLOW */ |
| 496 | ret = -EOVERFLOW; |
| 497 | if (!nr) |
| 498 | goto done_with_rbuf; |
| 499 | |
| 500 | /* Copy as much data as possible */ |
| 501 | ret = rbuf->count - offset; |
| 502 | if (ret > nr) |
| 503 | ret = nr; |
| 504 | memcpy(kbuf, rbuf->buf+offset, ret); |
| 505 | offset += ret; |
| 506 | |
| 507 | /* If we still have data left, we leave the rbuf in the cookie */ |
| 508 | if (offset < rbuf->count) |
| 509 | return ret; |
| 510 | |
| 511 | done_with_rbuf: |
| 512 | *cookie = NULL; |
| 513 | |
| 514 | if (n_hdlc->rx_free_buf_list.count > DEFAULT_RX_BUF_COUNT) |
| 515 | kfree(rbuf); |
| 516 | else |
| 517 | n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf); |
| 518 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 519 | return ret; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 520 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 521 | } /* end of n_hdlc_tty_read() */ |
| 522 | |
| 523 | /** |
| 524 | * n_hdlc_tty_write - write a single frame of data to device |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 525 | * @tty: pointer to associated tty device instance data |
| 526 | * @file: pointer to file object data |
| 527 | * @data: pointer to transmit data (one frame) |
| 528 | * @count: size of transmit frame in bytes |
| 529 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 530 | * Returns the number of bytes written (or error code). |
| 531 | */ |
| 532 | static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file, |
| 533 | const unsigned char *data, size_t count) |
| 534 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 535 | struct n_hdlc *n_hdlc = tty->disc_data; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 536 | int error = 0; |
| 537 | DECLARE_WAITQUEUE(wait, current); |
| 538 | struct n_hdlc_buf *tbuf; |
| 539 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 540 | pr_debug("%s() called count=%zd\n", __func__, count); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 541 | |
| 542 | if (n_hdlc->magic != HDLC_MAGIC) |
| 543 | return -EIO; |
| 544 | |
| 545 | /* verify frame size */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 546 | if (count > maxframe) { |
| 547 | pr_debug("%s: truncating user packet from %zu to %d\n", |
| 548 | __func__, count, maxframe); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 549 | count = maxframe; |
| 550 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 551 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 552 | add_wait_queue(&tty->write_wait, &wait); |
| 553 | |
| 554 | for (;;) { |
| 555 | set_current_state(TASK_INTERRUPTIBLE); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 556 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 557 | tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list); |
| 558 | if (tbuf) |
| 559 | break; |
| 560 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 561 | if (tty_io_nonblock(tty, file)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 562 | error = -EAGAIN; |
| 563 | break; |
| 564 | } |
| 565 | schedule(); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 566 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 567 | if (signal_pending(current)) { |
| 568 | error = -EINTR; |
| 569 | break; |
| 570 | } |
| 571 | } |
| 572 | |
| 573 | __set_current_state(TASK_RUNNING); |
| 574 | remove_wait_queue(&tty->write_wait, &wait); |
| 575 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 576 | if (!error) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 577 | /* Retrieve the user's buffer */ |
| 578 | memcpy(tbuf->buf, data, count); |
| 579 | |
| 580 | /* Send the data */ |
| 581 | tbuf->count = error = count; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 582 | n_hdlc_buf_put(&n_hdlc->tx_buf_list, tbuf); |
| 583 | n_hdlc_send_frames(n_hdlc, tty); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 584 | } |
| 585 | |
| 586 | return error; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 587 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 588 | } /* end of n_hdlc_tty_write() */ |
| 589 | |
| 590 | /** |
| 591 | * n_hdlc_tty_ioctl - process IOCTL system call for the tty device. |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 592 | * @tty: pointer to tty instance data |
| 593 | * @file: pointer to open file object for device |
| 594 | * @cmd: IOCTL command code |
| 595 | * @arg: argument for IOCTL call (cmd dependent) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 596 | * |
| 597 | * Returns command dependent result. |
| 598 | */ |
| 599 | static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, |
| 600 | unsigned int cmd, unsigned long arg) |
| 601 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 602 | struct n_hdlc *n_hdlc = tty->disc_data; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 603 | int error = 0; |
| 604 | int count; |
| 605 | unsigned long flags; |
| 606 | struct n_hdlc_buf *buf = NULL; |
| 607 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 608 | pr_debug("%s() called %d\n", __func__, cmd); |
| 609 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 610 | /* Verify the status of the device */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 611 | if (n_hdlc->magic != HDLC_MAGIC) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 612 | return -EBADF; |
| 613 | |
| 614 | switch (cmd) { |
| 615 | case FIONREAD: |
| 616 | /* report count of read data available */ |
| 617 | /* in next available frame (if any) */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 618 | spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock, flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 619 | buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list, |
| 620 | struct n_hdlc_buf, list_item); |
| 621 | if (buf) |
| 622 | count = buf->count; |
| 623 | else |
| 624 | count = 0; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 625 | spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock, flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 626 | error = put_user(count, (int __user *)arg); |
| 627 | break; |
| 628 | |
| 629 | case TIOCOUTQ: |
| 630 | /* get the pending tx byte count in the driver */ |
| 631 | count = tty_chars_in_buffer(tty); |
| 632 | /* add size of next output frame in queue */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 633 | spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 634 | buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list, |
| 635 | struct n_hdlc_buf, list_item); |
| 636 | if (buf) |
| 637 | count += buf->count; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 638 | spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 639 | error = put_user(count, (int __user *)arg); |
| 640 | break; |
| 641 | |
| 642 | case TCFLSH: |
| 643 | switch (arg) { |
| 644 | case TCIOFLUSH: |
| 645 | case TCOFLUSH: |
| 646 | flush_tx_queue(tty); |
| 647 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 648 | fallthrough; /* to default */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 649 | |
| 650 | default: |
| 651 | error = n_tty_ioctl_helper(tty, file, cmd, arg); |
| 652 | break; |
| 653 | } |
| 654 | return error; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 655 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 656 | } /* end of n_hdlc_tty_ioctl() */ |
| 657 | |
| 658 | /** |
| 659 | * n_hdlc_tty_poll - TTY callback for poll system call |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 660 | * @tty: pointer to tty instance data |
| 661 | * @filp: pointer to open file object for device |
| 662 | * @wait: wait queue for operations |
| 663 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 664 | * Determine which operations (read/write) will not block and return info |
| 665 | * to caller. |
| 666 | * Returns a bit mask containing info on which ops will not block. |
| 667 | */ |
| 668 | static __poll_t n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp, |
| 669 | poll_table *wait) |
| 670 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 671 | struct n_hdlc *n_hdlc = tty->disc_data; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 672 | __poll_t mask = 0; |
| 673 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 674 | if (n_hdlc->magic != HDLC_MAGIC) |
| 675 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 676 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 677 | /* |
| 678 | * queue the current process into any wait queue that may awaken in the |
| 679 | * future (read and write) |
| 680 | */ |
| 681 | poll_wait(filp, &tty->read_wait, wait); |
| 682 | poll_wait(filp, &tty->write_wait, wait); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 683 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 684 | /* set bits for operations that won't block */ |
| 685 | if (!list_empty(&n_hdlc->rx_buf_list.list)) |
| 686 | mask |= EPOLLIN | EPOLLRDNORM; /* readable */ |
| 687 | if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) |
| 688 | mask |= EPOLLHUP; |
| 689 | if (tty_hung_up_p(filp)) |
| 690 | mask |= EPOLLHUP; |
| 691 | if (!tty_is_writelocked(tty) && |
| 692 | !list_empty(&n_hdlc->tx_free_buf_list.list)) |
| 693 | mask |= EPOLLOUT | EPOLLWRNORM; /* writable */ |
| 694 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 695 | return mask; |
| 696 | } /* end of n_hdlc_tty_poll() */ |
| 697 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 698 | static void n_hdlc_alloc_buf(struct n_hdlc_buf_list *list, unsigned int count, |
| 699 | const char *name) |
| 700 | { |
| 701 | struct n_hdlc_buf *buf; |
| 702 | unsigned int i; |
| 703 | |
| 704 | for (i = 0; i < count; i++) { |
| 705 | buf = kmalloc(struct_size(buf, buf, maxframe), GFP_KERNEL); |
| 706 | if (!buf) { |
| 707 | pr_debug("%s(), kmalloc() failed for %s buffer %u\n", |
| 708 | __func__, name, i); |
| 709 | return; |
| 710 | } |
| 711 | n_hdlc_buf_put(list, buf); |
| 712 | } |
| 713 | } |
| 714 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 715 | /** |
| 716 | * n_hdlc_alloc - allocate an n_hdlc instance data structure |
| 717 | * |
| 718 | * Returns a pointer to newly created structure if success, otherwise %NULL |
| 719 | */ |
| 720 | static struct n_hdlc *n_hdlc_alloc(void) |
| 721 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 722 | struct n_hdlc *n_hdlc = kzalloc(sizeof(*n_hdlc), GFP_KERNEL); |
| 723 | |
| 724 | if (!n_hdlc) |
| 725 | return NULL; |
| 726 | |
| 727 | spin_lock_init(&n_hdlc->rx_free_buf_list.spinlock); |
| 728 | spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock); |
| 729 | spin_lock_init(&n_hdlc->rx_buf_list.spinlock); |
| 730 | spin_lock_init(&n_hdlc->tx_buf_list.spinlock); |
| 731 | |
| 732 | INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list); |
| 733 | INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list); |
| 734 | INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list); |
| 735 | INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list); |
| 736 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 737 | n_hdlc_alloc_buf(&n_hdlc->rx_free_buf_list, DEFAULT_RX_BUF_COUNT, "rx"); |
| 738 | n_hdlc_alloc_buf(&n_hdlc->tx_free_buf_list, DEFAULT_TX_BUF_COUNT, "tx"); |
| 739 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 740 | /* Initialize the control block */ |
| 741 | n_hdlc->magic = HDLC_MAGIC; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 742 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 743 | return n_hdlc; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 744 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 745 | } /* end of n_hdlc_alloc() */ |
| 746 | |
| 747 | /** |
| 748 | * n_hdlc_buf_return - put the HDLC buffer after the head of the specified list |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 749 | * @buf_list: pointer to the buffer list |
| 750 | * @buf: pointer to the buffer |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 751 | */ |
| 752 | static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list, |
| 753 | struct n_hdlc_buf *buf) |
| 754 | { |
| 755 | unsigned long flags; |
| 756 | |
| 757 | spin_lock_irqsave(&buf_list->spinlock, flags); |
| 758 | |
| 759 | list_add(&buf->list_item, &buf_list->list); |
| 760 | buf_list->count++; |
| 761 | |
| 762 | spin_unlock_irqrestore(&buf_list->spinlock, flags); |
| 763 | } |
| 764 | |
| 765 | /** |
| 766 | * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 767 | * @buf_list: pointer to buffer list |
| 768 | * @buf: pointer to buffer |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 769 | */ |
| 770 | static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list, |
| 771 | struct n_hdlc_buf *buf) |
| 772 | { |
| 773 | unsigned long flags; |
| 774 | |
| 775 | spin_lock_irqsave(&buf_list->spinlock, flags); |
| 776 | |
| 777 | list_add_tail(&buf->list_item, &buf_list->list); |
| 778 | buf_list->count++; |
| 779 | |
| 780 | spin_unlock_irqrestore(&buf_list->spinlock, flags); |
| 781 | } /* end of n_hdlc_buf_put() */ |
| 782 | |
| 783 | /** |
| 784 | * n_hdlc_buf_get - remove and return an HDLC buffer from list |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 785 | * @buf_list: pointer to HDLC buffer list |
| 786 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 787 | * Remove and return an HDLC buffer from the head of the specified HDLC buffer |
| 788 | * list. |
| 789 | * Returns a pointer to HDLC buffer if available, otherwise %NULL. |
| 790 | */ |
| 791 | static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list) |
| 792 | { |
| 793 | unsigned long flags; |
| 794 | struct n_hdlc_buf *buf; |
| 795 | |
| 796 | spin_lock_irqsave(&buf_list->spinlock, flags); |
| 797 | |
| 798 | buf = list_first_entry_or_null(&buf_list->list, |
| 799 | struct n_hdlc_buf, list_item); |
| 800 | if (buf) { |
| 801 | list_del(&buf->list_item); |
| 802 | buf_list->count--; |
| 803 | } |
| 804 | |
| 805 | spin_unlock_irqrestore(&buf_list->spinlock, flags); |
| 806 | return buf; |
| 807 | } /* end of n_hdlc_buf_get() */ |
| 808 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 809 | static struct tty_ldisc_ops n_hdlc_ldisc = { |
| 810 | .owner = THIS_MODULE, |
| 811 | .magic = TTY_LDISC_MAGIC, |
| 812 | .name = "hdlc", |
| 813 | .open = n_hdlc_tty_open, |
| 814 | .close = n_hdlc_tty_close, |
| 815 | .read = n_hdlc_tty_read, |
| 816 | .write = n_hdlc_tty_write, |
| 817 | .ioctl = n_hdlc_tty_ioctl, |
| 818 | .poll = n_hdlc_tty_poll, |
| 819 | .receive_buf = n_hdlc_tty_receive, |
| 820 | .write_wakeup = n_hdlc_tty_wakeup, |
| 821 | .flush_buffer = flush_rx_queue, |
| 822 | }; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 823 | |
| 824 | static int __init n_hdlc_init(void) |
| 825 | { |
| 826 | int status; |
| 827 | |
| 828 | /* range check maxframe arg */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 829 | maxframe = clamp(maxframe, 4096, MAX_HDLC_FRAME_SIZE); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 830 | |
| 831 | status = tty_register_ldisc(N_HDLC, &n_hdlc_ldisc); |
| 832 | if (!status) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 833 | pr_info("N_HDLC line discipline registered with maxframe=%d\n", |
| 834 | maxframe); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 835 | else |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 836 | pr_err("N_HDLC: error registering line discipline: %d\n", |
| 837 | status); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 838 | |
| 839 | return status; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 840 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 841 | } /* end of init_module() */ |
| 842 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 843 | static void __exit n_hdlc_exit(void) |
| 844 | { |
| 845 | /* Release tty registration of line discipline */ |
| 846 | int status = tty_unregister_ldisc(N_HDLC); |
| 847 | |
| 848 | if (status) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 849 | pr_err("N_HDLC: can't unregister line discipline (err = %d)\n", |
| 850 | status); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 851 | else |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 852 | pr_info("N_HDLC: line discipline unregistered\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 853 | } |
| 854 | |
| 855 | module_init(n_hdlc_init); |
| 856 | module_exit(n_hdlc_exit); |
| 857 | |
| 858 | MODULE_LICENSE("GPL"); |
| 859 | MODULE_AUTHOR("Paul Fulghum paulkf@microgate.com"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 860 | module_param(maxframe, int, 0); |
| 861 | MODULE_ALIAS_LDISC(N_HDLC); |