blob: a537e518384b9e492aa0721dc5d45aa8cf9ee721 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * cec-adap.c - HDMI Consumer Electronics Control framework - CEC adapter
4 *
5 * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6 */
7
8#include <linux/errno.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/kmod.h>
13#include <linux/ktime.h>
14#include <linux/slab.h>
15#include <linux/mm.h>
16#include <linux/string.h>
17#include <linux/types.h>
18
19#include <drm/drm_edid.h>
20
21#include "cec-priv.h"
22
23static void cec_fill_msg_report_features(struct cec_adapter *adap,
24 struct cec_msg *msg,
25 unsigned int la_idx);
26
27/*
28 * 400 ms is the time it takes for one 16 byte message to be
29 * transferred and 5 is the maximum number of retries. Add
30 * another 100 ms as a margin. So if the transmit doesn't
31 * finish before that time something is really wrong and we
32 * have to time out.
33 *
34 * This is a sign that something it really wrong and a warning
35 * will be issued.
36 */
37#define CEC_XFER_TIMEOUT_MS (5 * 400 + 100)
38
39#define call_op(adap, op, arg...) \
40 (adap->ops->op ? adap->ops->op(adap, ## arg) : 0)
41
42#define call_void_op(adap, op, arg...) \
43 do { \
44 if (adap->ops->op) \
45 adap->ops->op(adap, ## arg); \
46 } while (0)
47
48static int cec_log_addr2idx(const struct cec_adapter *adap, u8 log_addr)
49{
50 int i;
51
52 for (i = 0; i < adap->log_addrs.num_log_addrs; i++)
53 if (adap->log_addrs.log_addr[i] == log_addr)
54 return i;
55 return -1;
56}
57
58static unsigned int cec_log_addr2dev(const struct cec_adapter *adap, u8 log_addr)
59{
60 int i = cec_log_addr2idx(adap, log_addr);
61
62 return adap->log_addrs.primary_device_type[i < 0 ? 0 : i];
63}
64
65/*
66 * Queue a new event for this filehandle. If ts == 0, then set it
67 * to the current time.
68 *
69 * We keep a queue of at most max_event events where max_event differs
70 * per event. If the queue becomes full, then drop the oldest event and
71 * keep track of how many events we've dropped.
72 */
73void cec_queue_event_fh(struct cec_fh *fh,
74 const struct cec_event *new_ev, u64 ts)
75{
76 static const u16 max_events[CEC_NUM_EVENTS] = {
77 1, 1, 800, 800, 8, 8, 8, 8
78 };
79 struct cec_event_entry *entry;
80 unsigned int ev_idx = new_ev->event - 1;
81
82 if (WARN_ON(ev_idx >= ARRAY_SIZE(fh->events)))
83 return;
84
85 if (ts == 0)
86 ts = ktime_get_ns();
87
88 mutex_lock(&fh->lock);
89 if (ev_idx < CEC_NUM_CORE_EVENTS)
90 entry = &fh->core_events[ev_idx];
91 else
92 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
93 if (entry) {
94 if (new_ev->event == CEC_EVENT_LOST_MSGS &&
95 fh->queued_events[ev_idx]) {
96 entry->ev.lost_msgs.lost_msgs +=
97 new_ev->lost_msgs.lost_msgs;
98 goto unlock;
99 }
100 entry->ev = *new_ev;
101 entry->ev.ts = ts;
102
103 if (fh->queued_events[ev_idx] < max_events[ev_idx]) {
104 /* Add new msg at the end of the queue */
105 list_add_tail(&entry->list, &fh->events[ev_idx]);
106 fh->queued_events[ev_idx]++;
107 fh->total_queued_events++;
108 goto unlock;
109 }
110
111 if (ev_idx >= CEC_NUM_CORE_EVENTS) {
112 list_add_tail(&entry->list, &fh->events[ev_idx]);
113 /* drop the oldest event */
114 entry = list_first_entry(&fh->events[ev_idx],
115 struct cec_event_entry, list);
116 list_del(&entry->list);
117 kfree(entry);
118 }
119 }
120 /* Mark that events were lost */
121 entry = list_first_entry_or_null(&fh->events[ev_idx],
122 struct cec_event_entry, list);
123 if (entry)
124 entry->ev.flags |= CEC_EVENT_FL_DROPPED_EVENTS;
125
126unlock:
127 mutex_unlock(&fh->lock);
128 wake_up_interruptible(&fh->wait);
129}
130
131/* Queue a new event for all open filehandles. */
132static void cec_queue_event(struct cec_adapter *adap,
133 const struct cec_event *ev)
134{
135 u64 ts = ktime_get_ns();
136 struct cec_fh *fh;
137
138 mutex_lock(&adap->devnode.lock);
139 list_for_each_entry(fh, &adap->devnode.fhs, list)
140 cec_queue_event_fh(fh, ev, ts);
141 mutex_unlock(&adap->devnode.lock);
142}
143
144/* Notify userspace that the CEC pin changed state at the given time. */
145void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high,
146 bool dropped_events, ktime_t ts)
147{
148 struct cec_event ev = {
149 .event = is_high ? CEC_EVENT_PIN_CEC_HIGH :
150 CEC_EVENT_PIN_CEC_LOW,
151 .flags = dropped_events ? CEC_EVENT_FL_DROPPED_EVENTS : 0,
152 };
153 struct cec_fh *fh;
154
155 mutex_lock(&adap->devnode.lock);
156 list_for_each_entry(fh, &adap->devnode.fhs, list)
157 if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
158 cec_queue_event_fh(fh, &ev, ktime_to_ns(ts));
159 mutex_unlock(&adap->devnode.lock);
160}
161EXPORT_SYMBOL_GPL(cec_queue_pin_cec_event);
162
163/* Notify userspace that the HPD pin changed state at the given time. */
164void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts)
165{
166 struct cec_event ev = {
167 .event = is_high ? CEC_EVENT_PIN_HPD_HIGH :
168 CEC_EVENT_PIN_HPD_LOW,
169 };
170 struct cec_fh *fh;
171
172 mutex_lock(&adap->devnode.lock);
173 list_for_each_entry(fh, &adap->devnode.fhs, list)
174 cec_queue_event_fh(fh, &ev, ktime_to_ns(ts));
175 mutex_unlock(&adap->devnode.lock);
176}
177EXPORT_SYMBOL_GPL(cec_queue_pin_hpd_event);
178
179/* Notify userspace that the 5V pin changed state at the given time. */
180void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts)
181{
182 struct cec_event ev = {
183 .event = is_high ? CEC_EVENT_PIN_5V_HIGH :
184 CEC_EVENT_PIN_5V_LOW,
185 };
186 struct cec_fh *fh;
187
188 mutex_lock(&adap->devnode.lock);
189 list_for_each_entry(fh, &adap->devnode.fhs, list)
190 cec_queue_event_fh(fh, &ev, ktime_to_ns(ts));
191 mutex_unlock(&adap->devnode.lock);
192}
193EXPORT_SYMBOL_GPL(cec_queue_pin_5v_event);
194
195/*
196 * Queue a new message for this filehandle.
197 *
198 * We keep a queue of at most CEC_MAX_MSG_RX_QUEUE_SZ messages. If the
199 * queue becomes full, then drop the oldest message and keep track
200 * of how many messages we've dropped.
201 */
202static void cec_queue_msg_fh(struct cec_fh *fh, const struct cec_msg *msg)
203{
204 static const struct cec_event ev_lost_msgs = {
205 .event = CEC_EVENT_LOST_MSGS,
206 .flags = 0,
207 {
208 .lost_msgs = { 1 },
209 },
210 };
211 struct cec_msg_entry *entry;
212
213 mutex_lock(&fh->lock);
214 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
215 if (entry) {
216 entry->msg = *msg;
217 /* Add new msg at the end of the queue */
218 list_add_tail(&entry->list, &fh->msgs);
219
220 if (fh->queued_msgs < CEC_MAX_MSG_RX_QUEUE_SZ) {
221 /* All is fine if there is enough room */
222 fh->queued_msgs++;
223 mutex_unlock(&fh->lock);
224 wake_up_interruptible(&fh->wait);
225 return;
226 }
227
228 /*
229 * if the message queue is full, then drop the oldest one and
230 * send a lost message event.
231 */
232 entry = list_first_entry(&fh->msgs, struct cec_msg_entry, list);
233 list_del(&entry->list);
234 kfree(entry);
235 }
236 mutex_unlock(&fh->lock);
237
238 /*
239 * We lost a message, either because kmalloc failed or the queue
240 * was full.
241 */
242 cec_queue_event_fh(fh, &ev_lost_msgs, ktime_get_ns());
243}
244
245/*
246 * Queue the message for those filehandles that are in monitor mode.
247 * If valid_la is true (this message is for us or was sent by us),
248 * then pass it on to any monitoring filehandle. If this message
249 * isn't for us or from us, then only give it to filehandles that
250 * are in MONITOR_ALL mode.
251 *
252 * This can only happen if the CEC_CAP_MONITOR_ALL capability is
253 * set and the CEC adapter was placed in 'monitor all' mode.
254 */
255static void cec_queue_msg_monitor(struct cec_adapter *adap,
256 const struct cec_msg *msg,
257 bool valid_la)
258{
259 struct cec_fh *fh;
260 u32 monitor_mode = valid_la ? CEC_MODE_MONITOR :
261 CEC_MODE_MONITOR_ALL;
262
263 mutex_lock(&adap->devnode.lock);
264 list_for_each_entry(fh, &adap->devnode.fhs, list) {
265 if (fh->mode_follower >= monitor_mode)
266 cec_queue_msg_fh(fh, msg);
267 }
268 mutex_unlock(&adap->devnode.lock);
269}
270
271/*
272 * Queue the message for follower filehandles.
273 */
274static void cec_queue_msg_followers(struct cec_adapter *adap,
275 const struct cec_msg *msg)
276{
277 struct cec_fh *fh;
278
279 mutex_lock(&adap->devnode.lock);
280 list_for_each_entry(fh, &adap->devnode.fhs, list) {
281 if (fh->mode_follower == CEC_MODE_FOLLOWER)
282 cec_queue_msg_fh(fh, msg);
283 }
284 mutex_unlock(&adap->devnode.lock);
285}
286
287/* Notify userspace of an adapter state change. */
288static void cec_post_state_event(struct cec_adapter *adap)
289{
290 struct cec_event ev = {
291 .event = CEC_EVENT_STATE_CHANGE,
292 };
293
294 ev.state_change.phys_addr = adap->phys_addr;
295 ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
296 cec_queue_event(adap, &ev);
297}
298
299/*
300 * A CEC transmit (and a possible wait for reply) completed.
301 * If this was in blocking mode, then complete it, otherwise
302 * queue the message for userspace to dequeue later.
303 *
304 * This function is called with adap->lock held.
305 */
306static void cec_data_completed(struct cec_data *data)
307{
308 /*
309 * Delete this transmit from the filehandle's xfer_list since
310 * we're done with it.
311 *
312 * Note that if the filehandle is closed before this transmit
313 * finished, then the release() function will set data->fh to NULL.
314 * Without that we would be referring to a closed filehandle.
315 */
316 if (data->fh)
317 list_del(&data->xfer_list);
318
319 if (data->blocking) {
320 /*
321 * Someone is blocking so mark the message as completed
322 * and call complete.
323 */
324 data->completed = true;
325 complete(&data->c);
326 } else {
327 /*
328 * No blocking, so just queue the message if needed and
329 * free the memory.
330 */
331 if (data->fh)
332 cec_queue_msg_fh(data->fh, &data->msg);
333 kfree(data);
334 }
335}
336
337/*
338 * A pending CEC transmit needs to be cancelled, either because the CEC
339 * adapter is disabled or the transmit takes an impossibly long time to
340 * finish.
341 *
342 * This function is called with adap->lock held.
343 */
344static void cec_data_cancel(struct cec_data *data, u8 tx_status)
345{
346 /*
347 * It's either the current transmit, or it is a pending
348 * transmit. Take the appropriate action to clear it.
349 */
350 if (data->adap->transmitting == data) {
351 data->adap->transmitting = NULL;
352 } else {
353 list_del_init(&data->list);
354 if (!(data->msg.tx_status & CEC_TX_STATUS_OK))
355 data->adap->transmit_queue_sz--;
356 }
357
358 if (data->msg.tx_status & CEC_TX_STATUS_OK) {
359 data->msg.rx_ts = ktime_get_ns();
360 data->msg.rx_status = CEC_RX_STATUS_ABORTED;
361 } else {
362 data->msg.tx_ts = ktime_get_ns();
363 data->msg.tx_status |= tx_status |
364 CEC_TX_STATUS_MAX_RETRIES;
365 data->msg.tx_error_cnt++;
366 data->attempts = 0;
367 }
368
369 /* Queue transmitted message for monitoring purposes */
370 cec_queue_msg_monitor(data->adap, &data->msg, 1);
371
372 cec_data_completed(data);
373}
374
375/*
376 * Flush all pending transmits and cancel any pending timeout work.
377 *
378 * This function is called with adap->lock held.
379 */
380static void cec_flush(struct cec_adapter *adap)
381{
382 struct cec_data *data, *n;
383
384 /*
385 * If the adapter is disabled, or we're asked to stop,
386 * then cancel any pending transmits.
387 */
388 while (!list_empty(&adap->transmit_queue)) {
389 data = list_first_entry(&adap->transmit_queue,
390 struct cec_data, list);
391 cec_data_cancel(data, CEC_TX_STATUS_ABORTED);
392 }
393 if (adap->transmitting)
394 cec_data_cancel(adap->transmitting, CEC_TX_STATUS_ABORTED);
395
396 /* Cancel the pending timeout work. */
397 list_for_each_entry_safe(data, n, &adap->wait_queue, list) {
398 if (cancel_delayed_work(&data->work))
399 cec_data_cancel(data, CEC_TX_STATUS_OK);
400 /*
401 * If cancel_delayed_work returned false, then
402 * the cec_wait_timeout function is running,
403 * which will call cec_data_completed. So no
404 * need to do anything special in that case.
405 */
406 }
407}
408
409/*
410 * Main CEC state machine
411 *
412 * Wait until the thread should be stopped, or we are not transmitting and
413 * a new transmit message is queued up, in which case we start transmitting
414 * that message. When the adapter finished transmitting the message it will
415 * call cec_transmit_done().
416 *
417 * If the adapter is disabled, then remove all queued messages instead.
418 *
419 * If the current transmit times out, then cancel that transmit.
420 */
421int cec_thread_func(void *_adap)
422{
423 struct cec_adapter *adap = _adap;
424
425 for (;;) {
426 unsigned int signal_free_time;
427 struct cec_data *data;
428 bool timeout = false;
429 u8 attempts;
430
431 if (adap->transmitting) {
432 int err;
433
434 /*
435 * We are transmitting a message, so add a timeout
436 * to prevent the state machine to get stuck waiting
437 * for this message to finalize and add a check to
438 * see if the adapter is disabled in which case the
439 * transmit should be canceled.
440 */
441 err = wait_event_interruptible_timeout(adap->kthread_waitq,
442 (adap->needs_hpd &&
443 (!adap->is_configured && !adap->is_configuring)) ||
444 kthread_should_stop() ||
445 (!adap->transmitting &&
446 !list_empty(&adap->transmit_queue)),
447 msecs_to_jiffies(CEC_XFER_TIMEOUT_MS));
448 timeout = err == 0;
449 } else {
450 /* Otherwise we just wait for something to happen. */
451 wait_event_interruptible(adap->kthread_waitq,
452 kthread_should_stop() ||
453 (!adap->transmitting &&
454 !list_empty(&adap->transmit_queue)));
455 }
456
457 mutex_lock(&adap->lock);
458
459 if ((adap->needs_hpd &&
460 (!adap->is_configured && !adap->is_configuring)) ||
461 kthread_should_stop()) {
462 cec_flush(adap);
463 goto unlock;
464 }
465
466 if (adap->transmitting && timeout) {
467 /*
468 * If we timeout, then log that. Normally this does
469 * not happen and it is an indication of a faulty CEC
470 * adapter driver, or the CEC bus is in some weird
471 * state. On rare occasions it can happen if there is
472 * so much traffic on the bus that the adapter was
473 * unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s).
474 */
475 pr_warn("cec-%s: message %*ph timed out\n", adap->name,
476 adap->transmitting->msg.len,
477 adap->transmitting->msg.msg);
478 adap->tx_timeouts++;
479 /* Just give up on this. */
480 cec_data_cancel(adap->transmitting,
481 CEC_TX_STATUS_TIMEOUT);
482 goto unlock;
483 }
484
485 /*
486 * If we are still transmitting, or there is nothing new to
487 * transmit, then just continue waiting.
488 */
489 if (adap->transmitting || list_empty(&adap->transmit_queue))
490 goto unlock;
491
492 /* Get a new message to transmit */
493 data = list_first_entry(&adap->transmit_queue,
494 struct cec_data, list);
495 list_del_init(&data->list);
496 adap->transmit_queue_sz--;
497
498 /* Make this the current transmitting message */
499 adap->transmitting = data;
500
501 /*
502 * Suggested number of attempts as per the CEC 2.0 spec:
503 * 4 attempts is the default, except for 'secondary poll
504 * messages', i.e. poll messages not sent during the adapter
505 * configuration phase when it allocates logical addresses.
506 */
507 if (data->msg.len == 1 && adap->is_configured)
508 attempts = 2;
509 else
510 attempts = 4;
511
512 /* Set the suggested signal free time */
513 if (data->attempts) {
514 /* should be >= 3 data bit periods for a retry */
515 signal_free_time = CEC_SIGNAL_FREE_TIME_RETRY;
516 } else if (adap->last_initiator !=
517 cec_msg_initiator(&data->msg)) {
518 /* should be >= 5 data bit periods for new initiator */
519 signal_free_time = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
520 adap->last_initiator = cec_msg_initiator(&data->msg);
521 } else {
522 /*
523 * should be >= 7 data bit periods for sending another
524 * frame immediately after another.
525 */
526 signal_free_time = CEC_SIGNAL_FREE_TIME_NEXT_XFER;
527 }
528 if (data->attempts == 0)
529 data->attempts = attempts;
530
531 /* Tell the adapter to transmit, cancel on error */
532 if (adap->ops->adap_transmit(adap, data->attempts,
533 signal_free_time, &data->msg))
534 cec_data_cancel(data, CEC_TX_STATUS_ABORTED);
535
536unlock:
537 mutex_unlock(&adap->lock);
538
539 if (kthread_should_stop())
540 break;
541 }
542 return 0;
543}
544
545/*
546 * Called by the CEC adapter if a transmit finished.
547 */
548void cec_transmit_done_ts(struct cec_adapter *adap, u8 status,
549 u8 arb_lost_cnt, u8 nack_cnt, u8 low_drive_cnt,
550 u8 error_cnt, ktime_t ts)
551{
552 struct cec_data *data;
553 struct cec_msg *msg;
554 unsigned int attempts_made = arb_lost_cnt + nack_cnt +
555 low_drive_cnt + error_cnt;
556
557 dprintk(2, "%s: status 0x%02x\n", __func__, status);
558 if (attempts_made < 1)
559 attempts_made = 1;
560
561 mutex_lock(&adap->lock);
562 data = adap->transmitting;
563 if (!data) {
564 /*
565 * This can happen if a transmit was issued and the cable is
566 * unplugged while the transmit is ongoing. Ignore this
567 * transmit in that case.
568 */
569 dprintk(1, "%s was called without an ongoing transmit!\n",
570 __func__);
571 goto unlock;
572 }
573
574 msg = &data->msg;
575
576 /* Drivers must fill in the status! */
577 WARN_ON(status == 0);
578 msg->tx_ts = ktime_to_ns(ts);
579 msg->tx_status |= status;
580 msg->tx_arb_lost_cnt += arb_lost_cnt;
581 msg->tx_nack_cnt += nack_cnt;
582 msg->tx_low_drive_cnt += low_drive_cnt;
583 msg->tx_error_cnt += error_cnt;
584
585 /* Mark that we're done with this transmit */
586 adap->transmitting = NULL;
587
588 /*
589 * If there are still retry attempts left and there was an error and
590 * the hardware didn't signal that it retried itself (by setting
591 * CEC_TX_STATUS_MAX_RETRIES), then we will retry ourselves.
592 */
593 if (data->attempts > attempts_made &&
594 !(status & (CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_OK))) {
595 /* Retry this message */
596 data->attempts -= attempts_made;
597 if (msg->timeout)
598 dprintk(2, "retransmit: %*ph (attempts: %d, wait for 0x%02x)\n",
599 msg->len, msg->msg, data->attempts, msg->reply);
600 else
601 dprintk(2, "retransmit: %*ph (attempts: %d)\n",
602 msg->len, msg->msg, data->attempts);
603 /* Add the message in front of the transmit queue */
604 list_add(&data->list, &adap->transmit_queue);
605 adap->transmit_queue_sz++;
606 goto wake_thread;
607 }
608
609 data->attempts = 0;
610
611 /* Always set CEC_TX_STATUS_MAX_RETRIES on error */
612 if (!(status & CEC_TX_STATUS_OK))
613 msg->tx_status |= CEC_TX_STATUS_MAX_RETRIES;
614
615 /* Queue transmitted message for monitoring purposes */
616 cec_queue_msg_monitor(adap, msg, 1);
617
618 if ((status & CEC_TX_STATUS_OK) && adap->is_configured &&
619 msg->timeout) {
620 /*
621 * Queue the message into the wait queue if we want to wait
622 * for a reply.
623 */
624 list_add_tail(&data->list, &adap->wait_queue);
625 schedule_delayed_work(&data->work,
626 msecs_to_jiffies(msg->timeout));
627 } else {
628 /* Otherwise we're done */
629 cec_data_completed(data);
630 }
631
632wake_thread:
633 /*
634 * Wake up the main thread to see if another message is ready
635 * for transmitting or to retry the current message.
636 */
637 wake_up_interruptible(&adap->kthread_waitq);
638unlock:
639 mutex_unlock(&adap->lock);
640}
641EXPORT_SYMBOL_GPL(cec_transmit_done_ts);
642
643void cec_transmit_attempt_done_ts(struct cec_adapter *adap,
644 u8 status, ktime_t ts)
645{
646 switch (status & ~CEC_TX_STATUS_MAX_RETRIES) {
647 case CEC_TX_STATUS_OK:
648 cec_transmit_done_ts(adap, status, 0, 0, 0, 0, ts);
649 return;
650 case CEC_TX_STATUS_ARB_LOST:
651 cec_transmit_done_ts(adap, status, 1, 0, 0, 0, ts);
652 return;
653 case CEC_TX_STATUS_NACK:
654 cec_transmit_done_ts(adap, status, 0, 1, 0, 0, ts);
655 return;
656 case CEC_TX_STATUS_LOW_DRIVE:
657 cec_transmit_done_ts(adap, status, 0, 0, 1, 0, ts);
658 return;
659 case CEC_TX_STATUS_ERROR:
660 cec_transmit_done_ts(adap, status, 0, 0, 0, 1, ts);
661 return;
662 default:
663 /* Should never happen */
664 WARN(1, "cec-%s: invalid status 0x%02x\n", adap->name, status);
665 return;
666 }
667}
668EXPORT_SYMBOL_GPL(cec_transmit_attempt_done_ts);
669
670/*
671 * Called when waiting for a reply times out.
672 */
673static void cec_wait_timeout(struct work_struct *work)
674{
675 struct cec_data *data = container_of(work, struct cec_data, work.work);
676 struct cec_adapter *adap = data->adap;
677
678 mutex_lock(&adap->lock);
679 /*
680 * Sanity check in case the timeout and the arrival of the message
681 * happened at the same time.
682 */
683 if (list_empty(&data->list))
684 goto unlock;
685
686 /* Mark the message as timed out */
687 list_del_init(&data->list);
688 data->msg.rx_ts = ktime_get_ns();
689 data->msg.rx_status = CEC_RX_STATUS_TIMEOUT;
690 cec_data_completed(data);
691unlock:
692 mutex_unlock(&adap->lock);
693}
694
695/*
696 * Transmit a message. The fh argument may be NULL if the transmit is not
697 * associated with a specific filehandle.
698 *
699 * This function is called with adap->lock held.
700 */
701int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
702 struct cec_fh *fh, bool block)
703{
704 struct cec_data *data;
705
706 msg->rx_ts = 0;
707 msg->tx_ts = 0;
708 msg->rx_status = 0;
709 msg->tx_status = 0;
710 msg->tx_arb_lost_cnt = 0;
711 msg->tx_nack_cnt = 0;
712 msg->tx_low_drive_cnt = 0;
713 msg->tx_error_cnt = 0;
714 msg->sequence = 0;
715
716 if (msg->reply && msg->timeout == 0) {
717 /* Make sure the timeout isn't 0. */
718 msg->timeout = 1000;
719 }
720 if (msg->timeout)
721 msg->flags &= CEC_MSG_FL_REPLY_TO_FOLLOWERS;
722 else
723 msg->flags = 0;
724
725 if (msg->len > 1 && msg->msg[1] == CEC_MSG_CDC_MESSAGE) {
726 msg->msg[2] = adap->phys_addr >> 8;
727 msg->msg[3] = adap->phys_addr & 0xff;
728 }
729
730 /* Sanity checks */
731 if (msg->len == 0 || msg->len > CEC_MAX_MSG_SIZE) {
732 dprintk(1, "%s: invalid length %d\n", __func__, msg->len);
733 return -EINVAL;
734 }
735
736 memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
737
738 if (msg->timeout)
739 dprintk(2, "%s: %*ph (wait for 0x%02x%s)\n",
740 __func__, msg->len, msg->msg, msg->reply,
741 !block ? ", nb" : "");
742 else
743 dprintk(2, "%s: %*ph%s\n",
744 __func__, msg->len, msg->msg, !block ? " (nb)" : "");
745
746 if (msg->timeout && msg->len == 1) {
747 dprintk(1, "%s: can't reply to poll msg\n", __func__);
748 return -EINVAL;
749 }
750 if (msg->len == 1) {
751 if (cec_msg_destination(msg) == 0xf) {
752 dprintk(1, "%s: invalid poll message\n", __func__);
753 return -EINVAL;
754 }
755 if (cec_has_log_addr(adap, cec_msg_destination(msg))) {
756 /*
757 * If the destination is a logical address our adapter
758 * has already claimed, then just NACK this.
759 * It depends on the hardware what it will do with a
760 * POLL to itself (some OK this), so it is just as
761 * easy to handle it here so the behavior will be
762 * consistent.
763 */
764 msg->tx_ts = ktime_get_ns();
765 msg->tx_status = CEC_TX_STATUS_NACK |
766 CEC_TX_STATUS_MAX_RETRIES;
767 msg->tx_nack_cnt = 1;
768 msg->sequence = ++adap->sequence;
769 if (!msg->sequence)
770 msg->sequence = ++adap->sequence;
771 return 0;
772 }
773 }
774 if (msg->len > 1 && !cec_msg_is_broadcast(msg) &&
775 cec_has_log_addr(adap, cec_msg_destination(msg))) {
776 dprintk(1, "%s: destination is the adapter itself\n", __func__);
777 return -EINVAL;
778 }
779 if (msg->len > 1 && adap->is_configured &&
780 !cec_has_log_addr(adap, cec_msg_initiator(msg))) {
781 dprintk(1, "%s: initiator has unknown logical address %d\n",
782 __func__, cec_msg_initiator(msg));
783 return -EINVAL;
784 }
785 if (!adap->is_configured && !adap->is_configuring) {
786 if (adap->needs_hpd || msg->msg[0] != 0xf0) {
787 dprintk(1, "%s: adapter is unconfigured\n", __func__);
788 return -ENONET;
789 }
790 if (msg->reply) {
791 dprintk(1, "%s: invalid msg->reply\n", __func__);
792 return -EINVAL;
793 }
794 }
795
796 if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ) {
797 dprintk(1, "%s: transmit queue full\n", __func__);
798 return -EBUSY;
799 }
800
801 data = kzalloc(sizeof(*data), GFP_KERNEL);
802 if (!data)
803 return -ENOMEM;
804
805 msg->sequence = ++adap->sequence;
806 if (!msg->sequence)
807 msg->sequence = ++adap->sequence;
808
809 data->msg = *msg;
810 data->fh = fh;
811 data->adap = adap;
812 data->blocking = block;
813
814 init_completion(&data->c);
815 INIT_DELAYED_WORK(&data->work, cec_wait_timeout);
816
817 if (fh)
818 list_add_tail(&data->xfer_list, &fh->xfer_list);
819
820 list_add_tail(&data->list, &adap->transmit_queue);
821 adap->transmit_queue_sz++;
822 if (!adap->transmitting)
823 wake_up_interruptible(&adap->kthread_waitq);
824
825 /* All done if we don't need to block waiting for completion */
826 if (!block)
827 return 0;
828
829 /*
830 * Release the lock and wait, retake the lock afterwards.
831 */
832 mutex_unlock(&adap->lock);
833 wait_for_completion_killable(&data->c);
834 if (!data->completed)
835 cancel_delayed_work_sync(&data->work);
836 mutex_lock(&adap->lock);
837
838 /* Cancel the transmit if it was interrupted */
839 if (!data->completed)
840 cec_data_cancel(data, CEC_TX_STATUS_ABORTED);
841
842 /* The transmit completed (possibly with an error) */
843 *msg = data->msg;
844 kfree(data);
845 return 0;
846}
847
848/* Helper function to be used by drivers and this framework. */
849int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg,
850 bool block)
851{
852 int ret;
853
854 mutex_lock(&adap->lock);
855 ret = cec_transmit_msg_fh(adap, msg, NULL, block);
856 mutex_unlock(&adap->lock);
857 return ret;
858}
859EXPORT_SYMBOL_GPL(cec_transmit_msg);
860
861/*
862 * I don't like forward references but without this the low-level
863 * cec_received_msg() function would come after a bunch of high-level
864 * CEC protocol handling functions. That was very confusing.
865 */
866static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
867 bool is_reply);
868
869#define DIRECTED 0x80
870#define BCAST1_4 0x40
871#define BCAST2_0 0x20 /* broadcast only allowed for >= 2.0 */
872#define BCAST (BCAST1_4 | BCAST2_0)
873#define BOTH (BCAST | DIRECTED)
874
875/*
876 * Specify minimum length and whether the message is directed, broadcast
877 * or both. Messages that do not match the criteria are ignored as per
878 * the CEC specification.
879 */
880static const u8 cec_msg_size[256] = {
881 [CEC_MSG_ACTIVE_SOURCE] = 4 | BCAST,
882 [CEC_MSG_IMAGE_VIEW_ON] = 2 | DIRECTED,
883 [CEC_MSG_TEXT_VIEW_ON] = 2 | DIRECTED,
884 [CEC_MSG_INACTIVE_SOURCE] = 4 | DIRECTED,
885 [CEC_MSG_REQUEST_ACTIVE_SOURCE] = 2 | BCAST,
886 [CEC_MSG_ROUTING_CHANGE] = 6 | BCAST,
887 [CEC_MSG_ROUTING_INFORMATION] = 4 | BCAST,
888 [CEC_MSG_SET_STREAM_PATH] = 4 | BCAST,
889 [CEC_MSG_STANDBY] = 2 | BOTH,
890 [CEC_MSG_RECORD_OFF] = 2 | DIRECTED,
891 [CEC_MSG_RECORD_ON] = 3 | DIRECTED,
892 [CEC_MSG_RECORD_STATUS] = 3 | DIRECTED,
893 [CEC_MSG_RECORD_TV_SCREEN] = 2 | DIRECTED,
894 [CEC_MSG_CLEAR_ANALOGUE_TIMER] = 13 | DIRECTED,
895 [CEC_MSG_CLEAR_DIGITAL_TIMER] = 16 | DIRECTED,
896 [CEC_MSG_CLEAR_EXT_TIMER] = 13 | DIRECTED,
897 [CEC_MSG_SET_ANALOGUE_TIMER] = 13 | DIRECTED,
898 [CEC_MSG_SET_DIGITAL_TIMER] = 16 | DIRECTED,
899 [CEC_MSG_SET_EXT_TIMER] = 13 | DIRECTED,
900 [CEC_MSG_SET_TIMER_PROGRAM_TITLE] = 2 | DIRECTED,
901 [CEC_MSG_TIMER_CLEARED_STATUS] = 3 | DIRECTED,
902 [CEC_MSG_TIMER_STATUS] = 3 | DIRECTED,
903 [CEC_MSG_CEC_VERSION] = 3 | DIRECTED,
904 [CEC_MSG_GET_CEC_VERSION] = 2 | DIRECTED,
905 [CEC_MSG_GIVE_PHYSICAL_ADDR] = 2 | DIRECTED,
906 [CEC_MSG_GET_MENU_LANGUAGE] = 2 | DIRECTED,
907 [CEC_MSG_REPORT_PHYSICAL_ADDR] = 5 | BCAST,
908 [CEC_MSG_SET_MENU_LANGUAGE] = 5 | BCAST,
909 [CEC_MSG_REPORT_FEATURES] = 6 | BCAST,
910 [CEC_MSG_GIVE_FEATURES] = 2 | DIRECTED,
911 [CEC_MSG_DECK_CONTROL] = 3 | DIRECTED,
912 [CEC_MSG_DECK_STATUS] = 3 | DIRECTED,
913 [CEC_MSG_GIVE_DECK_STATUS] = 3 | DIRECTED,
914 [CEC_MSG_PLAY] = 3 | DIRECTED,
915 [CEC_MSG_GIVE_TUNER_DEVICE_STATUS] = 3 | DIRECTED,
916 [CEC_MSG_SELECT_ANALOGUE_SERVICE] = 6 | DIRECTED,
917 [CEC_MSG_SELECT_DIGITAL_SERVICE] = 9 | DIRECTED,
918 [CEC_MSG_TUNER_DEVICE_STATUS] = 7 | DIRECTED,
919 [CEC_MSG_TUNER_STEP_DECREMENT] = 2 | DIRECTED,
920 [CEC_MSG_TUNER_STEP_INCREMENT] = 2 | DIRECTED,
921 [CEC_MSG_DEVICE_VENDOR_ID] = 5 | BCAST,
922 [CEC_MSG_GIVE_DEVICE_VENDOR_ID] = 2 | DIRECTED,
923 [CEC_MSG_VENDOR_COMMAND] = 2 | DIRECTED,
924 [CEC_MSG_VENDOR_COMMAND_WITH_ID] = 5 | BOTH,
925 [CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN] = 2 | BOTH,
926 [CEC_MSG_VENDOR_REMOTE_BUTTON_UP] = 2 | BOTH,
927 [CEC_MSG_SET_OSD_STRING] = 3 | DIRECTED,
928 [CEC_MSG_GIVE_OSD_NAME] = 2 | DIRECTED,
929 [CEC_MSG_SET_OSD_NAME] = 2 | DIRECTED,
930 [CEC_MSG_MENU_REQUEST] = 3 | DIRECTED,
931 [CEC_MSG_MENU_STATUS] = 3 | DIRECTED,
932 [CEC_MSG_USER_CONTROL_PRESSED] = 3 | DIRECTED,
933 [CEC_MSG_USER_CONTROL_RELEASED] = 2 | DIRECTED,
934 [CEC_MSG_GIVE_DEVICE_POWER_STATUS] = 2 | DIRECTED,
935 [CEC_MSG_REPORT_POWER_STATUS] = 3 | DIRECTED | BCAST2_0,
936 [CEC_MSG_FEATURE_ABORT] = 4 | DIRECTED,
937 [CEC_MSG_ABORT] = 2 | DIRECTED,
938 [CEC_MSG_GIVE_AUDIO_STATUS] = 2 | DIRECTED,
939 [CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS] = 2 | DIRECTED,
940 [CEC_MSG_REPORT_AUDIO_STATUS] = 3 | DIRECTED,
941 [CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED,
942 [CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED,
943 [CEC_MSG_SET_SYSTEM_AUDIO_MODE] = 3 | BOTH,
944 [CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST] = 2 | DIRECTED,
945 [CEC_MSG_SYSTEM_AUDIO_MODE_STATUS] = 3 | DIRECTED,
946 [CEC_MSG_SET_AUDIO_RATE] = 3 | DIRECTED,
947 [CEC_MSG_INITIATE_ARC] = 2 | DIRECTED,
948 [CEC_MSG_REPORT_ARC_INITIATED] = 2 | DIRECTED,
949 [CEC_MSG_REPORT_ARC_TERMINATED] = 2 | DIRECTED,
950 [CEC_MSG_REQUEST_ARC_INITIATION] = 2 | DIRECTED,
951 [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED,
952 [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED,
953 [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST,
954 [CEC_MSG_REPORT_CURRENT_LATENCY] = 6 | BCAST,
955 [CEC_MSG_CDC_MESSAGE] = 2 | BCAST,
956};
957
958/* Called by the CEC adapter if a message is received */
959void cec_received_msg_ts(struct cec_adapter *adap,
960 struct cec_msg *msg, ktime_t ts)
961{
962 struct cec_data *data;
963 u8 msg_init = cec_msg_initiator(msg);
964 u8 msg_dest = cec_msg_destination(msg);
965 u8 cmd = msg->msg[1];
966 bool is_reply = false;
967 bool valid_la = true;
968 u8 min_len = 0;
969
970 if (WARN_ON(!msg->len || msg->len > CEC_MAX_MSG_SIZE))
971 return;
972
973 /*
974 * Some CEC adapters will receive the messages that they transmitted.
975 * This test filters out those messages by checking if we are the
976 * initiator, and just returning in that case.
977 *
978 * Note that this won't work if this is an Unregistered device.
979 *
980 * It is bad practice if the hardware receives the message that it
981 * transmitted and luckily most CEC adapters behave correctly in this
982 * respect.
983 */
984 if (msg_init != CEC_LOG_ADDR_UNREGISTERED &&
985 cec_has_log_addr(adap, msg_init))
986 return;
987
988 msg->rx_ts = ktime_to_ns(ts);
989 msg->rx_status = CEC_RX_STATUS_OK;
990 msg->sequence = msg->reply = msg->timeout = 0;
991 msg->tx_status = 0;
992 msg->tx_ts = 0;
993 msg->tx_arb_lost_cnt = 0;
994 msg->tx_nack_cnt = 0;
995 msg->tx_low_drive_cnt = 0;
996 msg->tx_error_cnt = 0;
997 msg->flags = 0;
998 memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
999
1000 mutex_lock(&adap->lock);
1001 dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg);
1002
1003 adap->last_initiator = 0xff;
1004
1005 /* Check if this message was for us (directed or broadcast). */
1006 if (!cec_msg_is_broadcast(msg))
1007 valid_la = cec_has_log_addr(adap, msg_dest);
1008
1009 /*
1010 * Check if the length is not too short or if the message is a
1011 * broadcast message where a directed message was expected or
1012 * vice versa. If so, then the message has to be ignored (according
1013 * to section CEC 7.3 and CEC 12.2).
1014 */
1015 if (valid_la && msg->len > 1 && cec_msg_size[cmd]) {
1016 u8 dir_fl = cec_msg_size[cmd] & BOTH;
1017
1018 min_len = cec_msg_size[cmd] & 0x1f;
1019 if (msg->len < min_len)
1020 valid_la = false;
1021 else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED))
1022 valid_la = false;
1023 else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4))
1024 valid_la = false;
1025 else if (cec_msg_is_broadcast(msg) &&
1026 adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 &&
1027 !(dir_fl & BCAST2_0))
1028 valid_la = false;
1029 }
1030 if (valid_la && min_len) {
1031 /* These messages have special length requirements */
1032 switch (cmd) {
1033 case CEC_MSG_TIMER_STATUS:
1034 if (msg->msg[2] & 0x10) {
1035 switch (msg->msg[2] & 0xf) {
1036 case CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE:
1037 case CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE:
1038 if (msg->len < 5)
1039 valid_la = false;
1040 break;
1041 }
1042 } else if ((msg->msg[2] & 0xf) == CEC_OP_PROG_ERROR_DUPLICATE) {
1043 if (msg->len < 5)
1044 valid_la = false;
1045 }
1046 break;
1047 case CEC_MSG_RECORD_ON:
1048 switch (msg->msg[2]) {
1049 case CEC_OP_RECORD_SRC_OWN:
1050 break;
1051 case CEC_OP_RECORD_SRC_DIGITAL:
1052 if (msg->len < 10)
1053 valid_la = false;
1054 break;
1055 case CEC_OP_RECORD_SRC_ANALOG:
1056 if (msg->len < 7)
1057 valid_la = false;
1058 break;
1059 case CEC_OP_RECORD_SRC_EXT_PLUG:
1060 if (msg->len < 4)
1061 valid_la = false;
1062 break;
1063 case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR:
1064 if (msg->len < 5)
1065 valid_la = false;
1066 break;
1067 }
1068 break;
1069 }
1070 }
1071
1072 /* It's a valid message and not a poll or CDC message */
1073 if (valid_la && msg->len > 1 && cmd != CEC_MSG_CDC_MESSAGE) {
1074 bool abort = cmd == CEC_MSG_FEATURE_ABORT;
1075
1076 /* The aborted command is in msg[2] */
1077 if (abort)
1078 cmd = msg->msg[2];
1079
1080 /*
1081 * Walk over all transmitted messages that are waiting for a
1082 * reply.
1083 */
1084 list_for_each_entry(data, &adap->wait_queue, list) {
1085 struct cec_msg *dst = &data->msg;
1086
1087 /*
1088 * The *only* CEC message that has two possible replies
1089 * is CEC_MSG_INITIATE_ARC.
1090 * In this case allow either of the two replies.
1091 */
1092 if (!abort && dst->msg[1] == CEC_MSG_INITIATE_ARC &&
1093 (cmd == CEC_MSG_REPORT_ARC_INITIATED ||
1094 cmd == CEC_MSG_REPORT_ARC_TERMINATED) &&
1095 (dst->reply == CEC_MSG_REPORT_ARC_INITIATED ||
1096 dst->reply == CEC_MSG_REPORT_ARC_TERMINATED))
1097 dst->reply = cmd;
1098
1099 /* Does the command match? */
1100 if ((abort && cmd != dst->msg[1]) ||
1101 (!abort && cmd != dst->reply))
1102 continue;
1103
1104 /* Does the addressing match? */
1105 if (msg_init != cec_msg_destination(dst) &&
1106 !cec_msg_is_broadcast(dst))
1107 continue;
1108
1109 /* We got a reply */
1110 memcpy(dst->msg, msg->msg, msg->len);
1111 dst->len = msg->len;
1112 dst->rx_ts = msg->rx_ts;
1113 dst->rx_status = msg->rx_status;
1114 if (abort)
1115 dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
1116 msg->flags = dst->flags;
1117 /* Remove it from the wait_queue */
1118 list_del_init(&data->list);
1119
1120 /* Cancel the pending timeout work */
1121 if (!cancel_delayed_work(&data->work)) {
1122 mutex_unlock(&adap->lock);
1123 flush_scheduled_work();
1124 mutex_lock(&adap->lock);
1125 }
1126 /*
1127 * Mark this as a reply, provided someone is still
1128 * waiting for the answer.
1129 */
1130 if (data->fh)
1131 is_reply = true;
1132 cec_data_completed(data);
1133 break;
1134 }
1135 }
1136 mutex_unlock(&adap->lock);
1137
1138 /* Pass the message on to any monitoring filehandles */
1139 cec_queue_msg_monitor(adap, msg, valid_la);
1140
1141 /* We're done if it is not for us or a poll message */
1142 if (!valid_la || msg->len <= 1)
1143 return;
1144
1145 if (adap->log_addrs.log_addr_mask == 0)
1146 return;
1147
1148 /*
1149 * Process the message on the protocol level. If is_reply is true,
1150 * then cec_receive_notify() won't pass on the reply to the listener(s)
1151 * since that was already done by cec_data_completed() above.
1152 */
1153 cec_receive_notify(adap, msg, is_reply);
1154}
1155EXPORT_SYMBOL_GPL(cec_received_msg_ts);
1156
1157/* Logical Address Handling */
1158
1159/*
1160 * Attempt to claim a specific logical address.
1161 *
1162 * This function is called with adap->lock held.
1163 */
1164static int cec_config_log_addr(struct cec_adapter *adap,
1165 unsigned int idx,
1166 unsigned int log_addr)
1167{
1168 struct cec_log_addrs *las = &adap->log_addrs;
1169 struct cec_msg msg = { };
1170 const unsigned int max_retries = 2;
1171 unsigned int i;
1172 int err;
1173
1174 if (cec_has_log_addr(adap, log_addr))
1175 return 0;
1176
1177 /* Send poll message */
1178 msg.len = 1;
1179 msg.msg[0] = (log_addr << 4) | log_addr;
1180
1181 for (i = 0; i < max_retries; i++) {
1182 err = cec_transmit_msg_fh(adap, &msg, NULL, true);
1183
1184 /*
1185 * While trying to poll the physical address was reset
1186 * and the adapter was unconfigured, so bail out.
1187 */
1188 if (!adap->is_configuring)
1189 return -EINTR;
1190
1191 if (err)
1192 return err;
1193
1194 /*
1195 * The message was aborted due to a disconnect or
1196 * unconfigure, just bail out.
1197 */
1198 if (msg.tx_status & CEC_TX_STATUS_ABORTED)
1199 return -EINTR;
1200 if (msg.tx_status & CEC_TX_STATUS_OK)
1201 return 0;
1202 if (msg.tx_status & CEC_TX_STATUS_NACK)
1203 break;
1204 /*
1205 * Retry up to max_retries times if the message was neither
1206 * OKed or NACKed. This can happen due to e.g. a Lost
1207 * Arbitration condition.
1208 */
1209 }
1210
1211 /*
1212 * If we are unable to get an OK or a NACK after max_retries attempts
1213 * (and note that each attempt already consists of four polls), then
1214 * then we assume that something is really weird and that it is not a
1215 * good idea to try and claim this logical address.
1216 */
1217 if (i == max_retries)
1218 return 0;
1219
1220 /*
1221 * Message not acknowledged, so this logical
1222 * address is free to use.
1223 */
1224 err = adap->ops->adap_log_addr(adap, log_addr);
1225 if (err)
1226 return err;
1227
1228 las->log_addr[idx] = log_addr;
1229 las->log_addr_mask |= 1 << log_addr;
1230 adap->phys_addrs[log_addr] = adap->phys_addr;
1231 return 1;
1232}
1233
1234/*
1235 * Unconfigure the adapter: clear all logical addresses and send
1236 * the state changed event.
1237 *
1238 * This function is called with adap->lock held.
1239 */
1240static void cec_adap_unconfigure(struct cec_adapter *adap)
1241{
1242 if (!adap->needs_hpd ||
1243 adap->phys_addr != CEC_PHYS_ADDR_INVALID)
1244 WARN_ON(adap->ops->adap_log_addr(adap, CEC_LOG_ADDR_INVALID));
1245 adap->log_addrs.log_addr_mask = 0;
1246 adap->is_configuring = false;
1247 adap->is_configured = false;
1248 memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs));
1249 cec_flush(adap);
1250 wake_up_interruptible(&adap->kthread_waitq);
1251 cec_post_state_event(adap);
1252}
1253
1254/*
1255 * Attempt to claim the required logical addresses.
1256 */
1257static int cec_config_thread_func(void *arg)
1258{
1259 /* The various LAs for each type of device */
1260 static const u8 tv_log_addrs[] = {
1261 CEC_LOG_ADDR_TV, CEC_LOG_ADDR_SPECIFIC,
1262 CEC_LOG_ADDR_INVALID
1263 };
1264 static const u8 record_log_addrs[] = {
1265 CEC_LOG_ADDR_RECORD_1, CEC_LOG_ADDR_RECORD_2,
1266 CEC_LOG_ADDR_RECORD_3,
1267 CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
1268 CEC_LOG_ADDR_INVALID
1269 };
1270 static const u8 tuner_log_addrs[] = {
1271 CEC_LOG_ADDR_TUNER_1, CEC_LOG_ADDR_TUNER_2,
1272 CEC_LOG_ADDR_TUNER_3, CEC_LOG_ADDR_TUNER_4,
1273 CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
1274 CEC_LOG_ADDR_INVALID
1275 };
1276 static const u8 playback_log_addrs[] = {
1277 CEC_LOG_ADDR_PLAYBACK_1, CEC_LOG_ADDR_PLAYBACK_2,
1278 CEC_LOG_ADDR_PLAYBACK_3,
1279 CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
1280 CEC_LOG_ADDR_INVALID
1281 };
1282 static const u8 audiosystem_log_addrs[] = {
1283 CEC_LOG_ADDR_AUDIOSYSTEM,
1284 CEC_LOG_ADDR_INVALID
1285 };
1286 static const u8 specific_use_log_addrs[] = {
1287 CEC_LOG_ADDR_SPECIFIC,
1288 CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
1289 CEC_LOG_ADDR_INVALID
1290 };
1291 static const u8 *type2addrs[6] = {
1292 [CEC_LOG_ADDR_TYPE_TV] = tv_log_addrs,
1293 [CEC_LOG_ADDR_TYPE_RECORD] = record_log_addrs,
1294 [CEC_LOG_ADDR_TYPE_TUNER] = tuner_log_addrs,
1295 [CEC_LOG_ADDR_TYPE_PLAYBACK] = playback_log_addrs,
1296 [CEC_LOG_ADDR_TYPE_AUDIOSYSTEM] = audiosystem_log_addrs,
1297 [CEC_LOG_ADDR_TYPE_SPECIFIC] = specific_use_log_addrs,
1298 };
1299 static const u16 type2mask[] = {
1300 [CEC_LOG_ADDR_TYPE_TV] = CEC_LOG_ADDR_MASK_TV,
1301 [CEC_LOG_ADDR_TYPE_RECORD] = CEC_LOG_ADDR_MASK_RECORD,
1302 [CEC_LOG_ADDR_TYPE_TUNER] = CEC_LOG_ADDR_MASK_TUNER,
1303 [CEC_LOG_ADDR_TYPE_PLAYBACK] = CEC_LOG_ADDR_MASK_PLAYBACK,
1304 [CEC_LOG_ADDR_TYPE_AUDIOSYSTEM] = CEC_LOG_ADDR_MASK_AUDIOSYSTEM,
1305 [CEC_LOG_ADDR_TYPE_SPECIFIC] = CEC_LOG_ADDR_MASK_SPECIFIC,
1306 };
1307 struct cec_adapter *adap = arg;
1308 struct cec_log_addrs *las = &adap->log_addrs;
1309 int err;
1310 int i, j;
1311
1312 mutex_lock(&adap->lock);
1313 dprintk(1, "physical address: %x.%x.%x.%x, claim %d logical addresses\n",
1314 cec_phys_addr_exp(adap->phys_addr), las->num_log_addrs);
1315 las->log_addr_mask = 0;
1316
1317 if (las->log_addr_type[0] == CEC_LOG_ADDR_TYPE_UNREGISTERED)
1318 goto configured;
1319
1320 for (i = 0; i < las->num_log_addrs; i++) {
1321 unsigned int type = las->log_addr_type[i];
1322 const u8 *la_list;
1323 u8 last_la;
1324
1325 /*
1326 * The TV functionality can only map to physical address 0.
1327 * For any other address, try the Specific functionality
1328 * instead as per the spec.
1329 */
1330 if (adap->phys_addr && type == CEC_LOG_ADDR_TYPE_TV)
1331 type = CEC_LOG_ADDR_TYPE_SPECIFIC;
1332
1333 la_list = type2addrs[type];
1334 last_la = las->log_addr[i];
1335 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1336 if (last_la == CEC_LOG_ADDR_INVALID ||
1337 last_la == CEC_LOG_ADDR_UNREGISTERED ||
1338 !((1 << last_la) & type2mask[type]))
1339 last_la = la_list[0];
1340
1341 err = cec_config_log_addr(adap, i, last_la);
1342 if (err > 0) /* Reused last LA */
1343 continue;
1344
1345 if (err < 0)
1346 goto unconfigure;
1347
1348 for (j = 0; la_list[j] != CEC_LOG_ADDR_INVALID; j++) {
1349 /* Tried this one already, skip it */
1350 if (la_list[j] == last_la)
1351 continue;
1352 /* The backup addresses are CEC 2.0 specific */
1353 if ((la_list[j] == CEC_LOG_ADDR_BACKUP_1 ||
1354 la_list[j] == CEC_LOG_ADDR_BACKUP_2) &&
1355 las->cec_version < CEC_OP_CEC_VERSION_2_0)
1356 continue;
1357
1358 err = cec_config_log_addr(adap, i, la_list[j]);
1359 if (err == 0) /* LA is in use */
1360 continue;
1361 if (err < 0)
1362 goto unconfigure;
1363 /* Done, claimed an LA */
1364 break;
1365 }
1366
1367 if (la_list[j] == CEC_LOG_ADDR_INVALID)
1368 dprintk(1, "could not claim LA %d\n", i);
1369 }
1370
1371 if (adap->log_addrs.log_addr_mask == 0 &&
1372 !(las->flags & CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK))
1373 goto unconfigure;
1374
1375configured:
1376 if (adap->log_addrs.log_addr_mask == 0) {
1377 /* Fall back to unregistered */
1378 las->log_addr[0] = CEC_LOG_ADDR_UNREGISTERED;
1379 las->log_addr_mask = 1 << las->log_addr[0];
1380 for (i = 1; i < las->num_log_addrs; i++)
1381 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1382 }
1383 for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
1384 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1385 adap->is_configured = true;
1386 adap->is_configuring = false;
1387 cec_post_state_event(adap);
1388
1389 /*
1390 * Now post the Report Features and Report Physical Address broadcast
1391 * messages. Note that these are non-blocking transmits, meaning that
1392 * they are just queued up and once adap->lock is unlocked the main
1393 * thread will kick in and start transmitting these.
1394 *
1395 * If after this function is done (but before one or more of these
1396 * messages are actually transmitted) the CEC adapter is unconfigured,
1397 * then any remaining messages will be dropped by the main thread.
1398 */
1399 for (i = 0; i < las->num_log_addrs; i++) {
1400 struct cec_msg msg = {};
1401
1402 if (las->log_addr[i] == CEC_LOG_ADDR_INVALID ||
1403 (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY))
1404 continue;
1405
1406 msg.msg[0] = (las->log_addr[i] << 4) | 0x0f;
1407
1408 /* Report Features must come first according to CEC 2.0 */
1409 if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED &&
1410 adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) {
1411 cec_fill_msg_report_features(adap, &msg, i);
1412 cec_transmit_msg_fh(adap, &msg, NULL, false);
1413 }
1414
1415 /* Report Physical Address */
1416 cec_msg_report_physical_addr(&msg, adap->phys_addr,
1417 las->primary_device_type[i]);
1418 dprintk(1, "config: la %d pa %x.%x.%x.%x\n",
1419 las->log_addr[i],
1420 cec_phys_addr_exp(adap->phys_addr));
1421 cec_transmit_msg_fh(adap, &msg, NULL, false);
1422 }
1423 adap->kthread_config = NULL;
1424 complete(&adap->config_completion);
1425 mutex_unlock(&adap->lock);
1426 return 0;
1427
1428unconfigure:
1429 for (i = 0; i < las->num_log_addrs; i++)
1430 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1431 cec_adap_unconfigure(adap);
1432 adap->kthread_config = NULL;
1433 mutex_unlock(&adap->lock);
1434 complete(&adap->config_completion);
1435 return 0;
1436}
1437
1438/*
1439 * Called from either __cec_s_phys_addr or __cec_s_log_addrs to claim the
1440 * logical addresses.
1441 *
1442 * This function is called with adap->lock held.
1443 */
1444static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
1445{
1446 if (WARN_ON(adap->is_configuring || adap->is_configured))
1447 return;
1448
1449 init_completion(&adap->config_completion);
1450
1451 /* Ready to kick off the thread */
1452 adap->is_configuring = true;
1453 adap->kthread_config = kthread_run(cec_config_thread_func, adap,
1454 "ceccfg-%s", adap->name);
1455 if (IS_ERR(adap->kthread_config)) {
1456 adap->kthread_config = NULL;
1457 } else if (block) {
1458 mutex_unlock(&adap->lock);
1459 wait_for_completion(&adap->config_completion);
1460 mutex_lock(&adap->lock);
1461 }
1462}
1463
1464/* Set a new physical address and send an event notifying userspace of this.
1465 *
1466 * This function is called with adap->lock held.
1467 */
1468void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
1469{
1470 if (phys_addr == adap->phys_addr)
1471 return;
1472 if (phys_addr != CEC_PHYS_ADDR_INVALID && adap->devnode.unregistered)
1473 return;
1474
1475 dprintk(1, "new physical address %x.%x.%x.%x\n",
1476 cec_phys_addr_exp(phys_addr));
1477 if (phys_addr == CEC_PHYS_ADDR_INVALID ||
1478 adap->phys_addr != CEC_PHYS_ADDR_INVALID) {
1479 adap->phys_addr = CEC_PHYS_ADDR_INVALID;
1480 cec_post_state_event(adap);
1481 cec_adap_unconfigure(adap);
1482 /* Disabling monitor all mode should always succeed */
1483 if (adap->monitor_all_cnt)
1484 WARN_ON(call_op(adap, adap_monitor_all_enable, false));
1485 mutex_lock(&adap->devnode.lock);
1486 if (adap->needs_hpd || list_empty(&adap->devnode.fhs))
1487 WARN_ON(adap->ops->adap_enable(adap, false));
1488 mutex_unlock(&adap->devnode.lock);
1489 if (phys_addr == CEC_PHYS_ADDR_INVALID)
1490 return;
1491 }
1492
1493 mutex_lock(&adap->devnode.lock);
1494 adap->last_initiator = 0xff;
1495
1496 if ((adap->needs_hpd || list_empty(&adap->devnode.fhs)) &&
1497 adap->ops->adap_enable(adap, true)) {
1498 mutex_unlock(&adap->devnode.lock);
1499 return;
1500 }
1501
1502 if (adap->monitor_all_cnt &&
1503 call_op(adap, adap_monitor_all_enable, true)) {
1504 if (adap->needs_hpd || list_empty(&adap->devnode.fhs))
1505 WARN_ON(adap->ops->adap_enable(adap, false));
1506 mutex_unlock(&adap->devnode.lock);
1507 return;
1508 }
1509 mutex_unlock(&adap->devnode.lock);
1510
1511 adap->phys_addr = phys_addr;
1512 cec_post_state_event(adap);
1513 if (adap->log_addrs.num_log_addrs)
1514 cec_claim_log_addrs(adap, block);
1515}
1516
1517void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
1518{
1519 if (IS_ERR_OR_NULL(adap))
1520 return;
1521
1522 mutex_lock(&adap->lock);
1523 __cec_s_phys_addr(adap, phys_addr, block);
1524 mutex_unlock(&adap->lock);
1525}
1526EXPORT_SYMBOL_GPL(cec_s_phys_addr);
1527
1528void cec_s_phys_addr_from_edid(struct cec_adapter *adap,
1529 const struct edid *edid)
1530{
1531 u16 pa = CEC_PHYS_ADDR_INVALID;
1532
1533 if (edid && edid->extensions)
1534 pa = cec_get_edid_phys_addr((const u8 *)edid,
1535 EDID_LENGTH * (edid->extensions + 1), NULL);
1536 cec_s_phys_addr(adap, pa, false);
1537}
1538EXPORT_SYMBOL_GPL(cec_s_phys_addr_from_edid);
1539
1540/*
1541 * Called from either the ioctl or a driver to set the logical addresses.
1542 *
1543 * This function is called with adap->lock held.
1544 */
1545int __cec_s_log_addrs(struct cec_adapter *adap,
1546 struct cec_log_addrs *log_addrs, bool block)
1547{
1548 u16 type_mask = 0;
1549 int i;
1550
1551 if (adap->devnode.unregistered)
1552 return -ENODEV;
1553
1554 if (!log_addrs || log_addrs->num_log_addrs == 0) {
1555 cec_adap_unconfigure(adap);
1556 adap->log_addrs.num_log_addrs = 0;
1557 for (i = 0; i < CEC_MAX_LOG_ADDRS; i++)
1558 adap->log_addrs.log_addr[i] = CEC_LOG_ADDR_INVALID;
1559 adap->log_addrs.osd_name[0] = '\0';
1560 adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE;
1561 adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
1562 return 0;
1563 }
1564
1565 if (log_addrs->flags & CEC_LOG_ADDRS_FL_CDC_ONLY) {
1566 /*
1567 * Sanitize log_addrs fields if a CDC-Only device is
1568 * requested.
1569 */
1570 log_addrs->num_log_addrs = 1;
1571 log_addrs->osd_name[0] = '\0';
1572 log_addrs->vendor_id = CEC_VENDOR_ID_NONE;
1573 log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
1574 /*
1575 * This is just an internal convention since a CDC-Only device
1576 * doesn't have to be a switch. But switches already use
1577 * unregistered, so it makes some kind of sense to pick this
1578 * as the primary device. Since a CDC-Only device never sends
1579 * any 'normal' CEC messages this primary device type is never
1580 * sent over the CEC bus.
1581 */
1582 log_addrs->primary_device_type[0] = CEC_OP_PRIM_DEVTYPE_SWITCH;
1583 log_addrs->all_device_types[0] = 0;
1584 log_addrs->features[0][0] = 0;
1585 log_addrs->features[0][1] = 0;
1586 }
1587
1588 /* Ensure the osd name is 0-terminated */
1589 log_addrs->osd_name[sizeof(log_addrs->osd_name) - 1] = '\0';
1590
1591 /* Sanity checks */
1592 if (log_addrs->num_log_addrs > adap->available_log_addrs) {
1593 dprintk(1, "num_log_addrs > %d\n", adap->available_log_addrs);
1594 return -EINVAL;
1595 }
1596
1597 /*
1598 * Vendor ID is a 24 bit number, so check if the value is
1599 * within the correct range.
1600 */
1601 if (log_addrs->vendor_id != CEC_VENDOR_ID_NONE &&
1602 (log_addrs->vendor_id & 0xff000000) != 0) {
1603 dprintk(1, "invalid vendor ID\n");
1604 return -EINVAL;
1605 }
1606
1607 if (log_addrs->cec_version != CEC_OP_CEC_VERSION_1_4 &&
1608 log_addrs->cec_version != CEC_OP_CEC_VERSION_2_0) {
1609 dprintk(1, "invalid CEC version\n");
1610 return -EINVAL;
1611 }
1612
1613 if (log_addrs->num_log_addrs > 1)
1614 for (i = 0; i < log_addrs->num_log_addrs; i++)
1615 if (log_addrs->log_addr_type[i] ==
1616 CEC_LOG_ADDR_TYPE_UNREGISTERED) {
1617 dprintk(1, "num_log_addrs > 1 can't be combined with unregistered LA\n");
1618 return -EINVAL;
1619 }
1620
1621 for (i = 0; i < log_addrs->num_log_addrs; i++) {
1622 const u8 feature_sz = ARRAY_SIZE(log_addrs->features[0]);
1623 u8 *features = log_addrs->features[i];
1624 bool op_is_dev_features = false;
1625 unsigned j;
1626
1627 log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID;
1628 if (type_mask & (1 << log_addrs->log_addr_type[i])) {
1629 dprintk(1, "duplicate logical address type\n");
1630 return -EINVAL;
1631 }
1632 type_mask |= 1 << log_addrs->log_addr_type[i];
1633 if ((type_mask & (1 << CEC_LOG_ADDR_TYPE_RECORD)) &&
1634 (type_mask & (1 << CEC_LOG_ADDR_TYPE_PLAYBACK))) {
1635 /* Record already contains the playback functionality */
1636 dprintk(1, "invalid record + playback combination\n");
1637 return -EINVAL;
1638 }
1639 if (log_addrs->primary_device_type[i] >
1640 CEC_OP_PRIM_DEVTYPE_PROCESSOR) {
1641 dprintk(1, "unknown primary device type\n");
1642 return -EINVAL;
1643 }
1644 if (log_addrs->primary_device_type[i] == 2) {
1645 dprintk(1, "invalid primary device type\n");
1646 return -EINVAL;
1647 }
1648 if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
1649 dprintk(1, "unknown logical address type\n");
1650 return -EINVAL;
1651 }
1652 for (j = 0; j < feature_sz; j++) {
1653 if ((features[j] & 0x80) == 0) {
1654 if (op_is_dev_features)
1655 break;
1656 op_is_dev_features = true;
1657 }
1658 }
1659 if (!op_is_dev_features || j == feature_sz) {
1660 dprintk(1, "malformed features\n");
1661 return -EINVAL;
1662 }
1663 /* Zero unused part of the feature array */
1664 memset(features + j + 1, 0, feature_sz - j - 1);
1665 }
1666
1667 if (log_addrs->cec_version >= CEC_OP_CEC_VERSION_2_0) {
1668 if (log_addrs->num_log_addrs > 2) {
1669 dprintk(1, "CEC 2.0 allows no more than 2 logical addresses\n");
1670 return -EINVAL;
1671 }
1672 if (log_addrs->num_log_addrs == 2) {
1673 if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_AUDIOSYSTEM) |
1674 (1 << CEC_LOG_ADDR_TYPE_TV)))) {
1675 dprintk(1, "two LAs is only allowed for audiosystem and TV\n");
1676 return -EINVAL;
1677 }
1678 if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_PLAYBACK) |
1679 (1 << CEC_LOG_ADDR_TYPE_RECORD)))) {
1680 dprintk(1, "an audiosystem/TV can only be combined with record or playback\n");
1681 return -EINVAL;
1682 }
1683 }
1684 }
1685
1686 /* Zero unused LAs */
1687 for (i = log_addrs->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++) {
1688 log_addrs->primary_device_type[i] = 0;
1689 log_addrs->log_addr_type[i] = 0;
1690 log_addrs->all_device_types[i] = 0;
1691 memset(log_addrs->features[i], 0,
1692 sizeof(log_addrs->features[i]));
1693 }
1694
1695 log_addrs->log_addr_mask = adap->log_addrs.log_addr_mask;
1696 adap->log_addrs = *log_addrs;
1697 if (adap->phys_addr != CEC_PHYS_ADDR_INVALID)
1698 cec_claim_log_addrs(adap, block);
1699 return 0;
1700}
1701
1702int cec_s_log_addrs(struct cec_adapter *adap,
1703 struct cec_log_addrs *log_addrs, bool block)
1704{
1705 int err;
1706
1707 mutex_lock(&adap->lock);
1708 err = __cec_s_log_addrs(adap, log_addrs, block);
1709 mutex_unlock(&adap->lock);
1710 return err;
1711}
1712EXPORT_SYMBOL_GPL(cec_s_log_addrs);
1713
1714/* High-level core CEC message handling */
1715
1716/* Fill in the Report Features message */
1717static void cec_fill_msg_report_features(struct cec_adapter *adap,
1718 struct cec_msg *msg,
1719 unsigned int la_idx)
1720{
1721 const struct cec_log_addrs *las = &adap->log_addrs;
1722 const u8 *features = las->features[la_idx];
1723 bool op_is_dev_features = false;
1724 unsigned int idx;
1725
1726 /* Report Features */
1727 msg->msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
1728 msg->len = 4;
1729 msg->msg[1] = CEC_MSG_REPORT_FEATURES;
1730 msg->msg[2] = adap->log_addrs.cec_version;
1731 msg->msg[3] = las->all_device_types[la_idx];
1732
1733 /* Write RC Profiles first, then Device Features */
1734 for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
1735 msg->msg[msg->len++] = features[idx];
1736 if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
1737 if (op_is_dev_features)
1738 break;
1739 op_is_dev_features = true;
1740 }
1741 }
1742}
1743
1744/* Transmit the Feature Abort message */
1745static int cec_feature_abort_reason(struct cec_adapter *adap,
1746 struct cec_msg *msg, u8 reason)
1747{
1748 struct cec_msg tx_msg = { };
1749
1750 /*
1751 * Don't reply with CEC_MSG_FEATURE_ABORT to a CEC_MSG_FEATURE_ABORT
1752 * message!
1753 */
1754 if (msg->msg[1] == CEC_MSG_FEATURE_ABORT)
1755 return 0;
1756 /* Don't Feature Abort messages from 'Unregistered' */
1757 if (cec_msg_initiator(msg) == CEC_LOG_ADDR_UNREGISTERED)
1758 return 0;
1759 cec_msg_set_reply_to(&tx_msg, msg);
1760 cec_msg_feature_abort(&tx_msg, msg->msg[1], reason);
1761 return cec_transmit_msg(adap, &tx_msg, false);
1762}
1763
1764static int cec_feature_abort(struct cec_adapter *adap, struct cec_msg *msg)
1765{
1766 return cec_feature_abort_reason(adap, msg,
1767 CEC_OP_ABORT_UNRECOGNIZED_OP);
1768}
1769
1770static int cec_feature_refused(struct cec_adapter *adap, struct cec_msg *msg)
1771{
1772 return cec_feature_abort_reason(adap, msg,
1773 CEC_OP_ABORT_REFUSED);
1774}
1775
1776/*
1777 * Called when a CEC message is received. This function will do any
1778 * necessary core processing. The is_reply bool is true if this message
1779 * is a reply to an earlier transmit.
1780 *
1781 * The message is either a broadcast message or a valid directed message.
1782 */
1783static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
1784 bool is_reply)
1785{
1786 bool is_broadcast = cec_msg_is_broadcast(msg);
1787 u8 dest_laddr = cec_msg_destination(msg);
1788 u8 init_laddr = cec_msg_initiator(msg);
1789 u8 devtype = cec_log_addr2dev(adap, dest_laddr);
1790 int la_idx = cec_log_addr2idx(adap, dest_laddr);
1791 bool from_unregistered = init_laddr == 0xf;
1792 struct cec_msg tx_cec_msg = { };
1793
1794 dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg);
1795
1796 /* If this is a CDC-Only device, then ignore any non-CDC messages */
1797 if (cec_is_cdc_only(&adap->log_addrs) &&
1798 msg->msg[1] != CEC_MSG_CDC_MESSAGE)
1799 return 0;
1800
1801 if (adap->ops->received) {
1802 /* Allow drivers to process the message first */
1803 if (adap->ops->received(adap, msg) != -ENOMSG)
1804 return 0;
1805 }
1806
1807 /*
1808 * REPORT_PHYSICAL_ADDR, CEC_MSG_USER_CONTROL_PRESSED and
1809 * CEC_MSG_USER_CONTROL_RELEASED messages always have to be
1810 * handled by the CEC core, even if the passthrough mode is on.
1811 * The others are just ignored if passthrough mode is on.
1812 */
1813 switch (msg->msg[1]) {
1814 case CEC_MSG_GET_CEC_VERSION:
1815 case CEC_MSG_ABORT:
1816 case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
1817 case CEC_MSG_GIVE_OSD_NAME:
1818 /*
1819 * These messages reply with a directed message, so ignore if
1820 * the initiator is Unregistered.
1821 */
1822 if (!adap->passthrough && from_unregistered)
1823 return 0;
1824 /* Fall through */
1825 case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
1826 case CEC_MSG_GIVE_FEATURES:
1827 case CEC_MSG_GIVE_PHYSICAL_ADDR:
1828 /*
1829 * Skip processing these messages if the passthrough mode
1830 * is on.
1831 */
1832 if (adap->passthrough)
1833 goto skip_processing;
1834 /* Ignore if addressing is wrong */
1835 if (is_broadcast)
1836 return 0;
1837 break;
1838
1839 case CEC_MSG_USER_CONTROL_PRESSED:
1840 case CEC_MSG_USER_CONTROL_RELEASED:
1841 /* Wrong addressing mode: don't process */
1842 if (is_broadcast || from_unregistered)
1843 goto skip_processing;
1844 break;
1845
1846 case CEC_MSG_REPORT_PHYSICAL_ADDR:
1847 /*
1848 * This message is always processed, regardless of the
1849 * passthrough setting.
1850 *
1851 * Exception: don't process if wrong addressing mode.
1852 */
1853 if (!is_broadcast)
1854 goto skip_processing;
1855 break;
1856
1857 default:
1858 break;
1859 }
1860
1861 cec_msg_set_reply_to(&tx_cec_msg, msg);
1862
1863 switch (msg->msg[1]) {
1864 /* The following messages are processed but still passed through */
1865 case CEC_MSG_REPORT_PHYSICAL_ADDR: {
1866 u16 pa = (msg->msg[2] << 8) | msg->msg[3];
1867
1868 if (!from_unregistered)
1869 adap->phys_addrs[init_laddr] = pa;
1870 dprintk(1, "reported physical address %x.%x.%x.%x for logical address %d\n",
1871 cec_phys_addr_exp(pa), init_laddr);
1872 break;
1873 }
1874
1875 case CEC_MSG_USER_CONTROL_PRESSED:
1876 if (!(adap->capabilities & CEC_CAP_RC) ||
1877 !(adap->log_addrs.flags & CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU))
1878 break;
1879
1880#ifdef CONFIG_MEDIA_CEC_RC
1881 switch (msg->msg[2]) {
1882 /*
1883 * Play function, this message can have variable length
1884 * depending on the specific play function that is used.
1885 */
1886 case 0x60:
1887 if (msg->len == 2)
1888 rc_keydown(adap->rc, RC_PROTO_CEC,
1889 msg->msg[2], 0);
1890 else
1891 rc_keydown(adap->rc, RC_PROTO_CEC,
1892 msg->msg[2] << 8 | msg->msg[3], 0);
1893 break;
1894 /*
1895 * Other function messages that are not handled.
1896 * Currently the RC framework does not allow to supply an
1897 * additional parameter to a keypress. These "keys" contain
1898 * other information such as channel number, an input number
1899 * etc.
1900 * For the time being these messages are not processed by the
1901 * framework and are simply forwarded to the user space.
1902 */
1903 case 0x56: case 0x57:
1904 case 0x67: case 0x68: case 0x69: case 0x6a:
1905 break;
1906 default:
1907 rc_keydown(adap->rc, RC_PROTO_CEC, msg->msg[2], 0);
1908 break;
1909 }
1910#endif
1911 break;
1912
1913 case CEC_MSG_USER_CONTROL_RELEASED:
1914 if (!(adap->capabilities & CEC_CAP_RC) ||
1915 !(adap->log_addrs.flags & CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU))
1916 break;
1917#ifdef CONFIG_MEDIA_CEC_RC
1918 rc_keyup(adap->rc);
1919#endif
1920 break;
1921
1922 /*
1923 * The remaining messages are only processed if the passthrough mode
1924 * is off.
1925 */
1926 case CEC_MSG_GET_CEC_VERSION:
1927 cec_msg_cec_version(&tx_cec_msg, adap->log_addrs.cec_version);
1928 return cec_transmit_msg(adap, &tx_cec_msg, false);
1929
1930 case CEC_MSG_GIVE_PHYSICAL_ADDR:
1931 /* Do nothing for CEC switches using addr 15 */
1932 if (devtype == CEC_OP_PRIM_DEVTYPE_SWITCH && dest_laddr == 15)
1933 return 0;
1934 cec_msg_report_physical_addr(&tx_cec_msg, adap->phys_addr, devtype);
1935 return cec_transmit_msg(adap, &tx_cec_msg, false);
1936
1937 case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
1938 if (adap->log_addrs.vendor_id == CEC_VENDOR_ID_NONE)
1939 return cec_feature_abort(adap, msg);
1940 cec_msg_device_vendor_id(&tx_cec_msg, adap->log_addrs.vendor_id);
1941 return cec_transmit_msg(adap, &tx_cec_msg, false);
1942
1943 case CEC_MSG_ABORT:
1944 /* Do nothing for CEC switches */
1945 if (devtype == CEC_OP_PRIM_DEVTYPE_SWITCH)
1946 return 0;
1947 return cec_feature_refused(adap, msg);
1948
1949 case CEC_MSG_GIVE_OSD_NAME: {
1950 if (adap->log_addrs.osd_name[0] == 0)
1951 return cec_feature_abort(adap, msg);
1952 cec_msg_set_osd_name(&tx_cec_msg, adap->log_addrs.osd_name);
1953 return cec_transmit_msg(adap, &tx_cec_msg, false);
1954 }
1955
1956 case CEC_MSG_GIVE_FEATURES:
1957 if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
1958 return cec_feature_abort(adap, msg);
1959 cec_fill_msg_report_features(adap, &tx_cec_msg, la_idx);
1960 return cec_transmit_msg(adap, &tx_cec_msg, false);
1961
1962 default:
1963 /*
1964 * Unprocessed messages are aborted if userspace isn't doing
1965 * any processing either.
1966 */
1967 if (!is_broadcast && !is_reply && !adap->follower_cnt &&
1968 !adap->cec_follower && msg->msg[1] != CEC_MSG_FEATURE_ABORT)
1969 return cec_feature_abort(adap, msg);
1970 break;
1971 }
1972
1973skip_processing:
1974 /* If this was a reply, then we're done, unless otherwise specified */
1975 if (is_reply && !(msg->flags & CEC_MSG_FL_REPLY_TO_FOLLOWERS))
1976 return 0;
1977
1978 /*
1979 * Send to the exclusive follower if there is one, otherwise send
1980 * to all followers.
1981 */
1982 if (adap->cec_follower)
1983 cec_queue_msg_fh(adap->cec_follower, msg);
1984 else
1985 cec_queue_msg_followers(adap, msg);
1986 return 0;
1987}
1988
1989/*
1990 * Helper functions to keep track of the 'monitor all' use count.
1991 *
1992 * These functions are called with adap->lock held.
1993 */
1994int cec_monitor_all_cnt_inc(struct cec_adapter *adap)
1995{
1996 int ret = 0;
1997
1998 if (adap->monitor_all_cnt == 0)
1999 ret = call_op(adap, adap_monitor_all_enable, 1);
2000 if (ret == 0)
2001 adap->monitor_all_cnt++;
2002 return ret;
2003}
2004
2005void cec_monitor_all_cnt_dec(struct cec_adapter *adap)
2006{
2007 adap->monitor_all_cnt--;
2008 if (adap->monitor_all_cnt == 0)
2009 WARN_ON(call_op(adap, adap_monitor_all_enable, 0));
2010}
2011
2012/*
2013 * Helper functions to keep track of the 'monitor pin' use count.
2014 *
2015 * These functions are called with adap->lock held.
2016 */
2017int cec_monitor_pin_cnt_inc(struct cec_adapter *adap)
2018{
2019 int ret = 0;
2020
2021 if (adap->monitor_pin_cnt == 0)
2022 ret = call_op(adap, adap_monitor_pin_enable, 1);
2023 if (ret == 0)
2024 adap->monitor_pin_cnt++;
2025 return ret;
2026}
2027
2028void cec_monitor_pin_cnt_dec(struct cec_adapter *adap)
2029{
2030 adap->monitor_pin_cnt--;
2031 if (adap->monitor_pin_cnt == 0)
2032 WARN_ON(call_op(adap, adap_monitor_pin_enable, 0));
2033}
2034
2035#ifdef CONFIG_DEBUG_FS
2036/*
2037 * Log the current state of the CEC adapter.
2038 * Very useful for debugging.
2039 */
2040int cec_adap_status(struct seq_file *file, void *priv)
2041{
2042 struct cec_adapter *adap = dev_get_drvdata(file->private);
2043 struct cec_data *data;
2044
2045 mutex_lock(&adap->lock);
2046 seq_printf(file, "configured: %d\n", adap->is_configured);
2047 seq_printf(file, "configuring: %d\n", adap->is_configuring);
2048 seq_printf(file, "phys_addr: %x.%x.%x.%x\n",
2049 cec_phys_addr_exp(adap->phys_addr));
2050 seq_printf(file, "number of LAs: %d\n", adap->log_addrs.num_log_addrs);
2051 seq_printf(file, "LA mask: 0x%04x\n", adap->log_addrs.log_addr_mask);
2052 if (adap->cec_follower)
2053 seq_printf(file, "has CEC follower%s\n",
2054 adap->passthrough ? " (in passthrough mode)" : "");
2055 if (adap->cec_initiator)
2056 seq_puts(file, "has CEC initiator\n");
2057 if (adap->monitor_all_cnt)
2058 seq_printf(file, "file handles in Monitor All mode: %u\n",
2059 adap->monitor_all_cnt);
2060 if (adap->tx_timeouts) {
2061 seq_printf(file, "transmit timeouts: %u\n",
2062 adap->tx_timeouts);
2063 adap->tx_timeouts = 0;
2064 }
2065 data = adap->transmitting;
2066 if (data)
2067 seq_printf(file, "transmitting message: %*ph (reply: %02x, timeout: %ums)\n",
2068 data->msg.len, data->msg.msg, data->msg.reply,
2069 data->msg.timeout);
2070 seq_printf(file, "pending transmits: %u\n", adap->transmit_queue_sz);
2071 list_for_each_entry(data, &adap->transmit_queue, list) {
2072 seq_printf(file, "queued tx message: %*ph (reply: %02x, timeout: %ums)\n",
2073 data->msg.len, data->msg.msg, data->msg.reply,
2074 data->msg.timeout);
2075 }
2076 list_for_each_entry(data, &adap->wait_queue, list) {
2077 seq_printf(file, "message waiting for reply: %*ph (reply: %02x, timeout: %ums)\n",
2078 data->msg.len, data->msg.msg, data->msg.reply,
2079 data->msg.timeout);
2080 }
2081
2082 call_void_op(adap, adap_status, file);
2083 mutex_unlock(&adap->lock);
2084 return 0;
2085}
2086#endif