blob: ed75636a6fb34e413aa50dc06cc9d1eda2a033d8 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * cec-api.c - HDMI Consumer Electronics Control framework - API
4 *
5 * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6 */
7
8#include <linux/errno.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/kmod.h>
13#include <linux/ktime.h>
14#include <linux/slab.h>
15#include <linux/mm.h>
16#include <linux/string.h>
17#include <linux/types.h>
18#include <linux/uaccess.h>
19#include <linux/version.h>
20
21#include <media/cec-pin.h>
22#include "cec-priv.h"
23#include "cec-pin-priv.h"
24
25static inline struct cec_devnode *cec_devnode_data(struct file *filp)
26{
27 struct cec_fh *fh = filp->private_data;
28
29 return &fh->adap->devnode;
30}
31
32/* CEC file operations */
33
34static __poll_t cec_poll(struct file *filp,
35 struct poll_table_struct *poll)
36{
37 struct cec_fh *fh = filp->private_data;
38 struct cec_adapter *adap = fh->adap;
39 __poll_t res = 0;
40
David Brazdil0f672f62019-12-10 10:32:29 +000041 poll_wait(filp, &fh->wait, poll);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000042 if (!cec_is_registered(adap))
43 return EPOLLERR | EPOLLHUP;
44 mutex_lock(&adap->lock);
45 if (adap->is_configured &&
46 adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
47 res |= EPOLLOUT | EPOLLWRNORM;
48 if (fh->queued_msgs)
49 res |= EPOLLIN | EPOLLRDNORM;
50 if (fh->total_queued_events)
51 res |= EPOLLPRI;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052 mutex_unlock(&adap->lock);
53 return res;
54}
55
56static bool cec_is_busy(const struct cec_adapter *adap,
57 const struct cec_fh *fh)
58{
59 bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
60 bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
61
62 /*
63 * Exclusive initiators and followers can always access the CEC adapter
64 */
65 if (valid_initiator || valid_follower)
66 return false;
67 /*
68 * All others can only access the CEC adapter if there is no
69 * exclusive initiator and they are in INITIATOR mode.
70 */
71 return adap->cec_initiator ||
72 fh->mode_initiator == CEC_MODE_NO_INITIATOR;
73}
74
75static long cec_adap_g_caps(struct cec_adapter *adap,
76 struct cec_caps __user *parg)
77{
78 struct cec_caps caps = {};
79
David Brazdil0f672f62019-12-10 10:32:29 +000080 strscpy(caps.driver, adap->devnode.dev.parent->driver->name,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000081 sizeof(caps.driver));
David Brazdil0f672f62019-12-10 10:32:29 +000082 strscpy(caps.name, adap->name, sizeof(caps.name));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083 caps.available_log_addrs = adap->available_log_addrs;
84 caps.capabilities = adap->capabilities;
85 caps.version = LINUX_VERSION_CODE;
86 if (copy_to_user(parg, &caps, sizeof(caps)))
87 return -EFAULT;
88 return 0;
89}
90
91static long cec_adap_g_phys_addr(struct cec_adapter *adap,
92 __u16 __user *parg)
93{
94 u16 phys_addr;
95
96 mutex_lock(&adap->lock);
97 phys_addr = adap->phys_addr;
98 mutex_unlock(&adap->lock);
99 if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
100 return -EFAULT;
101 return 0;
102}
103
104static int cec_validate_phys_addr(u16 phys_addr)
105{
106 int i;
107
108 if (phys_addr == CEC_PHYS_ADDR_INVALID)
109 return 0;
110 for (i = 0; i < 16; i += 4)
111 if (phys_addr & (0xf << i))
112 break;
113 if (i == 16)
114 return 0;
115 for (i += 4; i < 16; i += 4)
116 if ((phys_addr & (0xf << i)) == 0)
117 return -EINVAL;
118 return 0;
119}
120
121static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
122 bool block, __u16 __user *parg)
123{
124 u16 phys_addr;
125 long err;
126
127 if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
128 return -ENOTTY;
129 if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
130 return -EFAULT;
131
132 err = cec_validate_phys_addr(phys_addr);
133 if (err)
134 return err;
135 mutex_lock(&adap->lock);
136 if (cec_is_busy(adap, fh))
137 err = -EBUSY;
138 else
139 __cec_s_phys_addr(adap, phys_addr, block);
140 mutex_unlock(&adap->lock);
141 return err;
142}
143
144static long cec_adap_g_log_addrs(struct cec_adapter *adap,
145 struct cec_log_addrs __user *parg)
146{
147 struct cec_log_addrs log_addrs;
148
149 mutex_lock(&adap->lock);
Olivier Deprez0e641232021-09-23 10:07:05 +0200150 /*
151 * We use memcpy here instead of assignment since there is a
152 * hole at the end of struct cec_log_addrs that an assignment
153 * might ignore. So when we do copy_to_user() we could leak
154 * one byte of memory.
155 */
156 memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000157 if (!adap->is_configured)
158 memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
159 sizeof(log_addrs.log_addr));
160 mutex_unlock(&adap->lock);
161
162 if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
163 return -EFAULT;
164 return 0;
165}
166
167static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
168 bool block, struct cec_log_addrs __user *parg)
169{
170 struct cec_log_addrs log_addrs;
171 long err = -EBUSY;
172
173 if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
174 return -ENOTTY;
175 if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
176 return -EFAULT;
177 log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK |
178 CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
179 CEC_LOG_ADDRS_FL_CDC_ONLY;
180 mutex_lock(&adap->lock);
181 if (!adap->is_configuring &&
182 (!log_addrs.num_log_addrs || !adap->is_configured) &&
183 !cec_is_busy(adap, fh)) {
184 err = __cec_s_log_addrs(adap, &log_addrs, block);
185 if (!err)
186 log_addrs = adap->log_addrs;
187 }
188 mutex_unlock(&adap->lock);
189 if (err)
190 return err;
191 if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
192 return -EFAULT;
193 return 0;
194}
195
196static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
197 bool block, struct cec_msg __user *parg)
198{
199 struct cec_msg msg = {};
200 long err = 0;
201
202 if (!(adap->capabilities & CEC_CAP_TRANSMIT))
203 return -ENOTTY;
204 if (copy_from_user(&msg, parg, sizeof(msg)))
205 return -EFAULT;
206
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000207 mutex_lock(&adap->lock);
208 if (adap->log_addrs.num_log_addrs == 0)
209 err = -EPERM;
210 else if (adap->is_configuring)
211 err = -ENONET;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000212 else if (cec_is_busy(adap, fh))
213 err = -EBUSY;
214 else
215 err = cec_transmit_msg_fh(adap, &msg, fh, block);
216 mutex_unlock(&adap->lock);
217 if (err)
218 return err;
219 if (copy_to_user(parg, &msg, sizeof(msg)))
220 return -EFAULT;
221 return 0;
222}
223
224/* Called by CEC_RECEIVE: wait for a message to arrive */
225static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
226{
227 u32 timeout = msg->timeout;
228 int res;
229
230 do {
231 mutex_lock(&fh->lock);
232 /* Are there received messages queued up? */
233 if (fh->queued_msgs) {
234 /* Yes, return the first one */
235 struct cec_msg_entry *entry =
236 list_first_entry(&fh->msgs,
237 struct cec_msg_entry, list);
238
239 list_del(&entry->list);
240 *msg = entry->msg;
241 kfree(entry);
242 fh->queued_msgs--;
243 mutex_unlock(&fh->lock);
244 /* restore original timeout value */
245 msg->timeout = timeout;
246 return 0;
247 }
248
249 /* No, return EAGAIN in non-blocking mode or wait */
250 mutex_unlock(&fh->lock);
251
252 /* Return when in non-blocking mode */
253 if (!block)
254 return -EAGAIN;
255
256 if (msg->timeout) {
257 /* The user specified a timeout */
258 res = wait_event_interruptible_timeout(fh->wait,
259 fh->queued_msgs,
260 msecs_to_jiffies(msg->timeout));
261 if (res == 0)
262 res = -ETIMEDOUT;
263 else if (res > 0)
264 res = 0;
265 } else {
266 /* Wait indefinitely */
267 res = wait_event_interruptible(fh->wait,
268 fh->queued_msgs);
269 }
270 /* Exit on error, otherwise loop to get the new message */
271 } while (!res);
272 return res;
273}
274
275static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
276 bool block, struct cec_msg __user *parg)
277{
278 struct cec_msg msg = {};
279 long err;
280
281 if (copy_from_user(&msg, parg, sizeof(msg)))
282 return -EFAULT;
283
284 err = cec_receive_msg(fh, &msg, block);
285 if (err)
286 return err;
287 msg.flags = 0;
288 if (copy_to_user(parg, &msg, sizeof(msg)))
289 return -EFAULT;
290 return 0;
291}
292
293static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
294 bool block, struct cec_event __user *parg)
295{
296 struct cec_event_entry *ev = NULL;
297 u64 ts = ~0ULL;
298 unsigned int i;
299 unsigned int ev_idx;
300 long err = 0;
301
302 mutex_lock(&fh->lock);
303 while (!fh->total_queued_events && block) {
304 mutex_unlock(&fh->lock);
305 err = wait_event_interruptible(fh->wait,
306 fh->total_queued_events);
307 if (err)
308 return err;
309 mutex_lock(&fh->lock);
310 }
311
312 /* Find the oldest event */
313 for (i = 0; i < CEC_NUM_EVENTS; i++) {
314 struct cec_event_entry *entry =
315 list_first_entry_or_null(&fh->events[i],
316 struct cec_event_entry, list);
317
318 if (entry && entry->ev.ts <= ts) {
319 ev = entry;
320 ev_idx = i;
321 ts = ev->ev.ts;
322 }
323 }
324
325 if (!ev) {
326 err = -EAGAIN;
327 goto unlock;
328 }
329 list_del(&ev->list);
330
331 if (copy_to_user(parg, &ev->ev, sizeof(ev->ev)))
332 err = -EFAULT;
333 if (ev_idx >= CEC_NUM_CORE_EVENTS)
334 kfree(ev);
335 fh->queued_events[ev_idx]--;
336 fh->total_queued_events--;
337
338unlock:
339 mutex_unlock(&fh->lock);
340 return err;
341}
342
343static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
344 u32 __user *parg)
345{
346 u32 mode = fh->mode_initiator | fh->mode_follower;
347
348 if (copy_to_user(parg, &mode, sizeof(mode)))
349 return -EFAULT;
350 return 0;
351}
352
353static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
354 u32 __user *parg)
355{
356 u32 mode;
357 u8 mode_initiator;
358 u8 mode_follower;
359 bool send_pin_event = false;
360 long err = 0;
361
362 if (copy_from_user(&mode, parg, sizeof(mode)))
363 return -EFAULT;
364 if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) {
365 dprintk(1, "%s: invalid mode bits set\n", __func__);
366 return -EINVAL;
367 }
368
369 mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
370 mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
371
372 if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
373 mode_follower > CEC_MODE_MONITOR_ALL) {
374 dprintk(1, "%s: unknown mode\n", __func__);
375 return -EINVAL;
376 }
377
378 if (mode_follower == CEC_MODE_MONITOR_ALL &&
379 !(adap->capabilities & CEC_CAP_MONITOR_ALL)) {
380 dprintk(1, "%s: MONITOR_ALL not supported\n", __func__);
381 return -EINVAL;
382 }
383
384 if (mode_follower == CEC_MODE_MONITOR_PIN &&
385 !(adap->capabilities & CEC_CAP_MONITOR_PIN)) {
386 dprintk(1, "%s: MONITOR_PIN not supported\n", __func__);
387 return -EINVAL;
388 }
389
390 /* Follower modes should always be able to send CEC messages */
391 if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
392 !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
393 mode_follower >= CEC_MODE_FOLLOWER &&
394 mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
395 dprintk(1, "%s: cannot transmit\n", __func__);
396 return -EINVAL;
397 }
398
399 /* Monitor modes require CEC_MODE_NO_INITIATOR */
400 if (mode_initiator && mode_follower >= CEC_MODE_MONITOR_PIN) {
401 dprintk(1, "%s: monitor modes require NO_INITIATOR\n",
402 __func__);
403 return -EINVAL;
404 }
405
406 /* Monitor modes require CAP_NET_ADMIN */
407 if (mode_follower >= CEC_MODE_MONITOR_PIN && !capable(CAP_NET_ADMIN))
408 return -EPERM;
409
410 mutex_lock(&adap->lock);
411 /*
412 * You can't become exclusive follower if someone else already
413 * has that job.
414 */
415 if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
416 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
417 adap->cec_follower && adap->cec_follower != fh)
418 err = -EBUSY;
419 /*
420 * You can't become exclusive initiator if someone else already
421 * has that job.
422 */
423 if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
424 adap->cec_initiator && adap->cec_initiator != fh)
425 err = -EBUSY;
426
427 if (!err) {
428 bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
429 bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
430
431 if (old_mon_all != new_mon_all) {
432 if (new_mon_all)
433 err = cec_monitor_all_cnt_inc(adap);
434 else
435 cec_monitor_all_cnt_dec(adap);
436 }
437 }
438
439 if (!err) {
440 bool old_mon_pin = fh->mode_follower == CEC_MODE_MONITOR_PIN;
441 bool new_mon_pin = mode_follower == CEC_MODE_MONITOR_PIN;
442
443 if (old_mon_pin != new_mon_pin) {
444 send_pin_event = new_mon_pin;
445 if (new_mon_pin)
446 err = cec_monitor_pin_cnt_inc(adap);
447 else
448 cec_monitor_pin_cnt_dec(adap);
449 }
450 }
451
452 if (err) {
453 mutex_unlock(&adap->lock);
454 return err;
455 }
456
457 if (fh->mode_follower == CEC_MODE_FOLLOWER)
458 adap->follower_cnt--;
459 if (mode_follower == CEC_MODE_FOLLOWER)
460 adap->follower_cnt++;
461 if (send_pin_event) {
462 struct cec_event ev = {
463 .flags = CEC_EVENT_FL_INITIAL_STATE,
464 };
465
466 ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH :
467 CEC_EVENT_PIN_CEC_LOW;
468 cec_queue_event_fh(fh, &ev, 0);
469 }
470 if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
471 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
472 adap->passthrough =
473 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
474 adap->cec_follower = fh;
475 } else if (adap->cec_follower == fh) {
476 adap->passthrough = false;
477 adap->cec_follower = NULL;
478 }
479 if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
480 adap->cec_initiator = fh;
481 else if (adap->cec_initiator == fh)
482 adap->cec_initiator = NULL;
483 fh->mode_initiator = mode_initiator;
484 fh->mode_follower = mode_follower;
485 mutex_unlock(&adap->lock);
486 return 0;
487}
488
489static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
490{
491 struct cec_fh *fh = filp->private_data;
492 struct cec_adapter *adap = fh->adap;
493 bool block = !(filp->f_flags & O_NONBLOCK);
494 void __user *parg = (void __user *)arg;
495
496 if (!cec_is_registered(adap))
497 return -ENODEV;
498
499 switch (cmd) {
500 case CEC_ADAP_G_CAPS:
501 return cec_adap_g_caps(adap, parg);
502
503 case CEC_ADAP_G_PHYS_ADDR:
504 return cec_adap_g_phys_addr(adap, parg);
505
506 case CEC_ADAP_S_PHYS_ADDR:
507 return cec_adap_s_phys_addr(adap, fh, block, parg);
508
509 case CEC_ADAP_G_LOG_ADDRS:
510 return cec_adap_g_log_addrs(adap, parg);
511
512 case CEC_ADAP_S_LOG_ADDRS:
513 return cec_adap_s_log_addrs(adap, fh, block, parg);
514
515 case CEC_TRANSMIT:
516 return cec_transmit(adap, fh, block, parg);
517
518 case CEC_RECEIVE:
519 return cec_receive(adap, fh, block, parg);
520
521 case CEC_DQEVENT:
522 return cec_dqevent(adap, fh, block, parg);
523
524 case CEC_G_MODE:
525 return cec_g_mode(adap, fh, parg);
526
527 case CEC_S_MODE:
528 return cec_s_mode(adap, fh, parg);
529
530 default:
531 return -ENOTTY;
532 }
533}
534
535static int cec_open(struct inode *inode, struct file *filp)
536{
537 struct cec_devnode *devnode =
538 container_of(inode->i_cdev, struct cec_devnode, cdev);
539 struct cec_adapter *adap = to_cec_adapter(devnode);
540 struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
541 /*
542 * Initial events that are automatically sent when the cec device is
543 * opened.
544 */
545 struct cec_event ev = {
546 .event = CEC_EVENT_STATE_CHANGE,
547 .flags = CEC_EVENT_FL_INITIAL_STATE,
548 };
549 unsigned int i;
550 int err;
551
552 if (!fh)
553 return -ENOMEM;
554
555 INIT_LIST_HEAD(&fh->msgs);
556 INIT_LIST_HEAD(&fh->xfer_list);
557 for (i = 0; i < CEC_NUM_EVENTS; i++)
558 INIT_LIST_HEAD(&fh->events[i]);
559 mutex_init(&fh->lock);
560 init_waitqueue_head(&fh->wait);
561
562 fh->mode_initiator = CEC_MODE_INITIATOR;
563 fh->adap = adap;
564
565 err = cec_get_device(devnode);
566 if (err) {
567 kfree(fh);
568 return err;
569 }
570
571 mutex_lock(&devnode->lock);
572 if (list_empty(&devnode->fhs) &&
573 !adap->needs_hpd &&
574 adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
575 err = adap->ops->adap_enable(adap, true);
576 if (err) {
577 mutex_unlock(&devnode->lock);
578 kfree(fh);
579 return err;
580 }
581 }
582 filp->private_data = fh;
583
584 /* Queue up initial state events */
585 ev.state_change.phys_addr = adap->phys_addr;
586 ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
587 cec_queue_event_fh(fh, &ev, 0);
588#ifdef CONFIG_CEC_PIN
589 if (adap->pin && adap->pin->ops->read_hpd) {
590 err = adap->pin->ops->read_hpd(adap);
591 if (err >= 0) {
592 ev.event = err ? CEC_EVENT_PIN_HPD_HIGH :
593 CEC_EVENT_PIN_HPD_LOW;
594 cec_queue_event_fh(fh, &ev, 0);
595 }
596 }
597 if (adap->pin && adap->pin->ops->read_5v) {
598 err = adap->pin->ops->read_5v(adap);
599 if (err >= 0) {
600 ev.event = err ? CEC_EVENT_PIN_5V_HIGH :
601 CEC_EVENT_PIN_5V_LOW;
602 cec_queue_event_fh(fh, &ev, 0);
603 }
604 }
605#endif
606
607 list_add(&fh->list, &devnode->fhs);
608 mutex_unlock(&devnode->lock);
609
610 return 0;
611}
612
613/* Override for the release function */
614static int cec_release(struct inode *inode, struct file *filp)
615{
616 struct cec_devnode *devnode = cec_devnode_data(filp);
617 struct cec_adapter *adap = to_cec_adapter(devnode);
618 struct cec_fh *fh = filp->private_data;
619 unsigned int i;
620
621 mutex_lock(&adap->lock);
622 if (adap->cec_initiator == fh)
623 adap->cec_initiator = NULL;
624 if (adap->cec_follower == fh) {
625 adap->cec_follower = NULL;
626 adap->passthrough = false;
627 }
628 if (fh->mode_follower == CEC_MODE_FOLLOWER)
629 adap->follower_cnt--;
630 if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
631 cec_monitor_pin_cnt_dec(adap);
632 if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
633 cec_monitor_all_cnt_dec(adap);
634 mutex_unlock(&adap->lock);
635
636 mutex_lock(&devnode->lock);
637 list_del(&fh->list);
638 if (cec_is_registered(adap) && list_empty(&devnode->fhs) &&
639 !adap->needs_hpd && adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
640 WARN_ON(adap->ops->adap_enable(adap, false));
641 }
642 mutex_unlock(&devnode->lock);
643
644 /* Unhook pending transmits from this filehandle. */
645 mutex_lock(&adap->lock);
646 while (!list_empty(&fh->xfer_list)) {
647 struct cec_data *data =
648 list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
649
650 data->blocking = false;
651 data->fh = NULL;
652 list_del(&data->xfer_list);
653 }
654 mutex_unlock(&adap->lock);
655 while (!list_empty(&fh->msgs)) {
656 struct cec_msg_entry *entry =
657 list_first_entry(&fh->msgs, struct cec_msg_entry, list);
658
659 list_del(&entry->list);
660 kfree(entry);
661 }
662 for (i = CEC_NUM_CORE_EVENTS; i < CEC_NUM_EVENTS; i++) {
663 while (!list_empty(&fh->events[i])) {
664 struct cec_event_entry *entry =
665 list_first_entry(&fh->events[i],
666 struct cec_event_entry, list);
667
668 list_del(&entry->list);
669 kfree(entry);
670 }
671 }
672 kfree(fh);
673
674 cec_put_device(devnode);
675 filp->private_data = NULL;
676 return 0;
677}
678
679const struct file_operations cec_devnode_fops = {
680 .owner = THIS_MODULE,
681 .open = cec_open,
682 .unlocked_ioctl = cec_ioctl,
David Brazdil0f672f62019-12-10 10:32:29 +0000683 .compat_ioctl = cec_ioctl,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000684 .release = cec_release,
685 .poll = cec_poll,
686 .llseek = no_llseek,
687};