blob: 40b079162804fb0c7d43d000392abb5c60448c37 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (C) ST-Ericsson AB 2010
4 * Author: Sjur Brendeland
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005 */
6
7#include <linux/hardirq.h>
8#include <linux/init.h>
9#include <linux/module.h>
10#include <linux/device.h>
11#include <linux/types.h>
12#include <linux/skbuff.h>
13#include <linux/netdevice.h>
14#include <linux/rtnetlink.h>
15#include <linux/tty.h>
16#include <linux/file.h>
17#include <linux/if_arp.h>
18#include <net/caif/caif_device.h>
19#include <net/caif/cfcnfg.h>
20#include <linux/err.h>
21#include <linux/debugfs.h>
22
23MODULE_LICENSE("GPL");
24MODULE_AUTHOR("Sjur Brendeland");
25MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
26MODULE_LICENSE("GPL");
27MODULE_ALIAS_LDISC(N_CAIF);
28
29#define SEND_QUEUE_LOW 10
30#define SEND_QUEUE_HIGH 100
31#define CAIF_SENDING 1 /* Bit 1 = 0x02*/
32#define CAIF_FLOW_OFF_SENT 4 /* Bit 4 = 0x10 */
33#define MAX_WRITE_CHUNK 4096
34#define ON 1
35#define OFF 0
36#define CAIF_MAX_MTU 4096
37
38static DEFINE_SPINLOCK(ser_lock);
39static LIST_HEAD(ser_list);
40static LIST_HEAD(ser_release_list);
41
42static bool ser_loop;
43module_param(ser_loop, bool, 0444);
44MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
45
46static bool ser_use_stx = true;
47module_param(ser_use_stx, bool, 0444);
48MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
49
50static bool ser_use_fcs = true;
51
52module_param(ser_use_fcs, bool, 0444);
53MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
54
55static int ser_write_chunk = MAX_WRITE_CHUNK;
56module_param(ser_write_chunk, int, 0444);
57
58MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART.");
59
60static struct dentry *debugfsdir;
61
62static int caif_net_open(struct net_device *dev);
63static int caif_net_close(struct net_device *dev);
64
65struct ser_device {
66 struct caif_dev_common common;
67 struct list_head node;
68 struct net_device *dev;
69 struct sk_buff_head head;
70 struct tty_struct *tty;
71 bool tx_started;
72 unsigned long state;
73#ifdef CONFIG_DEBUG_FS
74 struct dentry *debugfs_tty_dir;
75 struct debugfs_blob_wrapper tx_blob;
76 struct debugfs_blob_wrapper rx_blob;
77 u8 rx_data[128];
78 u8 tx_data[128];
79 u8 tty_status;
80
81#endif
82};
83
84static void caifdev_setup(struct net_device *dev);
85static void ldisc_tx_wakeup(struct tty_struct *tty);
86#ifdef CONFIG_DEBUG_FS
87static inline void update_tty_status(struct ser_device *ser)
88{
89 ser->tty_status =
90 ser->tty->stopped << 5 |
91 ser->tty->flow_stopped << 3 |
92 ser->tty->packet << 2 |
93 ser->tty->port->low_latency << 1;
94}
95static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
96{
David Brazdil0f672f62019-12-10 10:32:29 +000097 ser->debugfs_tty_dir = debugfs_create_dir(tty->name, debugfsdir);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098
David Brazdil0f672f62019-12-10 10:32:29 +000099 debugfs_create_blob("last_tx_msg", 0400, ser->debugfs_tty_dir,
100 &ser->tx_blob);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000101
David Brazdil0f672f62019-12-10 10:32:29 +0000102 debugfs_create_blob("last_rx_msg", 0400, ser->debugfs_tty_dir,
103 &ser->rx_blob);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000104
David Brazdil0f672f62019-12-10 10:32:29 +0000105 debugfs_create_x32("ser_state", 0400, ser->debugfs_tty_dir,
106 (u32 *)&ser->state);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000107
David Brazdil0f672f62019-12-10 10:32:29 +0000108 debugfs_create_x8("tty_status", 0400, ser->debugfs_tty_dir,
109 &ser->tty_status);
110
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111 ser->tx_blob.data = ser->tx_data;
112 ser->tx_blob.size = 0;
113 ser->rx_blob.data = ser->rx_data;
114 ser->rx_blob.size = 0;
115}
116
117static inline void debugfs_deinit(struct ser_device *ser)
118{
119 debugfs_remove_recursive(ser->debugfs_tty_dir);
120}
121
122static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
123{
124 if (size > sizeof(ser->rx_data))
125 size = sizeof(ser->rx_data);
126 memcpy(ser->rx_data, data, size);
127 ser->rx_blob.data = ser->rx_data;
128 ser->rx_blob.size = size;
129}
130
131static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
132{
133 if (size > sizeof(ser->tx_data))
134 size = sizeof(ser->tx_data);
135 memcpy(ser->tx_data, data, size);
136 ser->tx_blob.data = ser->tx_data;
137 ser->tx_blob.size = size;
138}
139#else
140static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
141{
142}
143
144static inline void debugfs_deinit(struct ser_device *ser)
145{
146}
147
148static inline void update_tty_status(struct ser_device *ser)
149{
150}
151
152static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
153{
154}
155
156static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
157{
158}
159
160#endif
161
162static void ldisc_receive(struct tty_struct *tty, const u8 *data,
163 char *flags, int count)
164{
165 struct sk_buff *skb = NULL;
166 struct ser_device *ser;
167 int ret;
168
169 ser = tty->disc_data;
170
171 /*
172 * NOTE: flags may contain information about break or overrun.
173 * This is not yet handled.
174 */
175
176
177 /*
178 * Workaround for garbage at start of transmission,
179 * only enable if STX handling is not enabled.
180 */
181 if (!ser->common.use_stx && !ser->tx_started) {
182 dev_info(&ser->dev->dev,
183 "Bytes received before initial transmission -"
184 "bytes discarded.\n");
185 return;
186 }
187
188 BUG_ON(ser->dev == NULL);
189
190 /* Get a suitable caif packet and copy in data. */
191 skb = netdev_alloc_skb(ser->dev, count+1);
192 if (skb == NULL)
193 return;
194 skb_put_data(skb, data, count);
195
196 skb->protocol = htons(ETH_P_CAIF);
197 skb_reset_mac_header(skb);
198 debugfs_rx(ser, data, count);
199 /* Push received packet up the stack. */
200 ret = netif_rx_ni(skb);
201 if (!ret) {
202 ser->dev->stats.rx_packets++;
203 ser->dev->stats.rx_bytes += count;
204 } else
205 ++ser->dev->stats.rx_dropped;
206 update_tty_status(ser);
207}
208
209static int handle_tx(struct ser_device *ser)
210{
211 struct tty_struct *tty;
212 struct sk_buff *skb;
213 int tty_wr, len, room;
214
215 tty = ser->tty;
216 ser->tx_started = true;
217
218 /* Enter critical section */
219 if (test_and_set_bit(CAIF_SENDING, &ser->state))
220 return 0;
221
222 /* skb_peek is safe because handle_tx is called after skb_queue_tail */
223 while ((skb = skb_peek(&ser->head)) != NULL) {
224
225 /* Make sure you don't write too much */
226 len = skb->len;
227 room = tty_write_room(tty);
228 if (!room)
229 break;
230 if (room > ser_write_chunk)
231 room = ser_write_chunk;
232 if (len > room)
233 len = room;
234
235 /* Write to tty or loopback */
236 if (!ser_loop) {
237 tty_wr = tty->ops->write(tty, skb->data, len);
238 update_tty_status(ser);
239 } else {
240 tty_wr = len;
241 ldisc_receive(tty, skb->data, NULL, len);
242 }
243 ser->dev->stats.tx_packets++;
244 ser->dev->stats.tx_bytes += tty_wr;
245
246 /* Error on TTY ?! */
247 if (tty_wr < 0)
248 goto error;
249 /* Reduce buffer written, and discard if empty */
250 skb_pull(skb, tty_wr);
251 if (skb->len == 0) {
252 struct sk_buff *tmp = skb_dequeue(&ser->head);
253 WARN_ON(tmp != skb);
David Brazdil0f672f62019-12-10 10:32:29 +0000254 dev_consume_skb_any(skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000255 }
256 }
257 /* Send flow off if queue is empty */
258 if (ser->head.qlen <= SEND_QUEUE_LOW &&
259 test_and_clear_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
260 ser->common.flowctrl != NULL)
261 ser->common.flowctrl(ser->dev, ON);
262 clear_bit(CAIF_SENDING, &ser->state);
263 return 0;
264error:
265 clear_bit(CAIF_SENDING, &ser->state);
266 return tty_wr;
267}
268
269static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
270{
271 struct ser_device *ser;
272
273 BUG_ON(dev == NULL);
274 ser = netdev_priv(dev);
275
276 /* Send flow off once, on high water mark */
277 if (ser->head.qlen > SEND_QUEUE_HIGH &&
278 !test_and_set_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
279 ser->common.flowctrl != NULL)
280
281 ser->common.flowctrl(ser->dev, OFF);
282
283 skb_queue_tail(&ser->head, skb);
284 return handle_tx(ser);
285}
286
287
288static void ldisc_tx_wakeup(struct tty_struct *tty)
289{
290 struct ser_device *ser;
291
292 ser = tty->disc_data;
293 BUG_ON(ser == NULL);
294 WARN_ON(ser->tty != tty);
295 handle_tx(ser);
296}
297
298
299static void ser_release(struct work_struct *work)
300{
301 struct list_head list;
302 struct ser_device *ser, *tmp;
303
304 spin_lock(&ser_lock);
305 list_replace_init(&ser_release_list, &list);
306 spin_unlock(&ser_lock);
307
308 if (!list_empty(&list)) {
309 rtnl_lock();
310 list_for_each_entry_safe(ser, tmp, &list, node) {
311 dev_close(ser->dev);
312 unregister_netdevice(ser->dev);
313 debugfs_deinit(ser);
314 }
315 rtnl_unlock();
316 }
317}
318
319static DECLARE_WORK(ser_release_work, ser_release);
320
321static int ldisc_open(struct tty_struct *tty)
322{
323 struct ser_device *ser;
324 struct net_device *dev;
325 char name[64];
326 int result;
327
328 /* No write no play */
329 if (tty->ops->write == NULL)
330 return -EOPNOTSUPP;
331 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG))
332 return -EPERM;
333
334 /* release devices to avoid name collision */
335 ser_release(NULL);
336
337 result = snprintf(name, sizeof(name), "cf%s", tty->name);
338 if (result >= IFNAMSIZ)
339 return -EINVAL;
340 dev = alloc_netdev(sizeof(*ser), name, NET_NAME_UNKNOWN,
341 caifdev_setup);
342 if (!dev)
343 return -ENOMEM;
344
345 ser = netdev_priv(dev);
346 ser->tty = tty_kref_get(tty);
347 ser->dev = dev;
348 debugfs_init(ser, tty);
349 tty->receive_room = N_TTY_BUF_SIZE;
350 tty->disc_data = ser;
351 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
352 rtnl_lock();
353 result = register_netdevice(dev);
354 if (result) {
355 rtnl_unlock();
356 free_netdev(dev);
357 return -ENODEV;
358 }
359
360 spin_lock(&ser_lock);
361 list_add(&ser->node, &ser_list);
362 spin_unlock(&ser_lock);
363 rtnl_unlock();
364 netif_stop_queue(dev);
365 update_tty_status(ser);
366 return 0;
367}
368
369static void ldisc_close(struct tty_struct *tty)
370{
371 struct ser_device *ser = tty->disc_data;
372
373 tty_kref_put(ser->tty);
374
375 spin_lock(&ser_lock);
376 list_move(&ser->node, &ser_release_list);
377 spin_unlock(&ser_lock);
378 schedule_work(&ser_release_work);
379}
380
381/* The line discipline structure. */
382static struct tty_ldisc_ops caif_ldisc = {
383 .owner = THIS_MODULE,
384 .magic = TTY_LDISC_MAGIC,
385 .name = "n_caif",
386 .open = ldisc_open,
387 .close = ldisc_close,
388 .receive_buf = ldisc_receive,
389 .write_wakeup = ldisc_tx_wakeup
390};
391
392static int register_ldisc(void)
393{
394 int result;
395
396 result = tty_register_ldisc(N_CAIF, &caif_ldisc);
397 if (result < 0) {
398 pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
399 result);
400 return result;
401 }
402 return result;
403}
404static const struct net_device_ops netdev_ops = {
405 .ndo_open = caif_net_open,
406 .ndo_stop = caif_net_close,
407 .ndo_start_xmit = caif_xmit
408};
409
410static void caifdev_setup(struct net_device *dev)
411{
412 struct ser_device *serdev = netdev_priv(dev);
413
414 dev->features = 0;
415 dev->netdev_ops = &netdev_ops;
416 dev->type = ARPHRD_CAIF;
417 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
418 dev->mtu = CAIF_MAX_MTU;
419 dev->priv_flags |= IFF_NO_QUEUE;
420 dev->needs_free_netdev = true;
421 skb_queue_head_init(&serdev->head);
422 serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
423 serdev->common.use_frag = true;
424 serdev->common.use_stx = ser_use_stx;
425 serdev->common.use_fcs = ser_use_fcs;
426 serdev->dev = dev;
427}
428
429
430static int caif_net_open(struct net_device *dev)
431{
432 netif_wake_queue(dev);
433 return 0;
434}
435
436static int caif_net_close(struct net_device *dev)
437{
438 netif_stop_queue(dev);
439 return 0;
440}
441
442static int __init caif_ser_init(void)
443{
444 int ret;
445
446 ret = register_ldisc();
447 debugfsdir = debugfs_create_dir("caif_serial", NULL);
448 return ret;
449}
450
451static void __exit caif_ser_exit(void)
452{
453 spin_lock(&ser_lock);
454 list_splice(&ser_list, &ser_release_list);
455 spin_unlock(&ser_lock);
456 ser_release(NULL);
457 cancel_work_sync(&ser_release_work);
458 tty_unregister_ldisc(N_CAIF);
459 debugfs_remove_recursive(debugfsdir);
460}
461
462module_init(caif_ser_init);
463module_exit(caif_ser_exit);