blob: e53932d27ac5215a3034be7939cb7fe9e76cba6d [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
David Brazdil0f672f62019-12-10 10:32:29 +00003 * Thunderbolt driver - switch/port utility functions
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
David Brazdil0f672f62019-12-10 10:32:29 +00006 * Copyright (C) 2018, Intel Corporation
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8
9#include <linux/delay.h>
10#include <linux/idr.h>
11#include <linux/nvmem-provider.h>
12#include <linux/pm_runtime.h>
David Brazdil0f672f62019-12-10 10:32:29 +000013#include <linux/sched/signal.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014#include <linux/sizes.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17
18#include "tb.h"
19
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000020/* Switch NVM support */
21
22#define NVM_DEVID 0x05
23#define NVM_VERSION 0x08
24#define NVM_CSS 0x10
25#define NVM_FLASH_SIZE 0x45
26
27#define NVM_MIN_SIZE SZ_32K
28#define NVM_MAX_SIZE SZ_512K
29
30static DEFINE_IDA(nvm_ida);
31
32struct nvm_auth_status {
33 struct list_head list;
34 uuid_t uuid;
35 u32 status;
36};
37
38/*
39 * Hold NVM authentication failure status per switch This information
40 * needs to stay around even when the switch gets power cycled so we
41 * keep it separately.
42 */
43static LIST_HEAD(nvm_auth_status_cache);
44static DEFINE_MUTEX(nvm_auth_status_lock);
45
46static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
47{
48 struct nvm_auth_status *st;
49
50 list_for_each_entry(st, &nvm_auth_status_cache, list) {
51 if (uuid_equal(&st->uuid, sw->uuid))
52 return st;
53 }
54
55 return NULL;
56}
57
58static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
59{
60 struct nvm_auth_status *st;
61
62 mutex_lock(&nvm_auth_status_lock);
63 st = __nvm_get_auth_status(sw);
64 mutex_unlock(&nvm_auth_status_lock);
65
66 *status = st ? st->status : 0;
67}
68
69static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
70{
71 struct nvm_auth_status *st;
72
73 if (WARN_ON(!sw->uuid))
74 return;
75
76 mutex_lock(&nvm_auth_status_lock);
77 st = __nvm_get_auth_status(sw);
78
79 if (!st) {
80 st = kzalloc(sizeof(*st), GFP_KERNEL);
81 if (!st)
82 goto unlock;
83
84 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
85 INIT_LIST_HEAD(&st->list);
86 list_add_tail(&st->list, &nvm_auth_status_cache);
87 }
88
89 st->status = status;
90unlock:
91 mutex_unlock(&nvm_auth_status_lock);
92}
93
94static void nvm_clear_auth_status(const struct tb_switch *sw)
95{
96 struct nvm_auth_status *st;
97
98 mutex_lock(&nvm_auth_status_lock);
99 st = __nvm_get_auth_status(sw);
100 if (st) {
101 list_del(&st->list);
102 kfree(st);
103 }
104 mutex_unlock(&nvm_auth_status_lock);
105}
106
107static int nvm_validate_and_write(struct tb_switch *sw)
108{
109 unsigned int image_size, hdr_size;
110 const u8 *buf = sw->nvm->buf;
111 u16 ds_size;
112 int ret;
113
114 if (!buf)
115 return -EINVAL;
116
117 image_size = sw->nvm->buf_data_size;
118 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
119 return -EINVAL;
120
121 /*
122 * FARB pointer must point inside the image and must at least
123 * contain parts of the digital section we will be reading here.
124 */
125 hdr_size = (*(u32 *)buf) & 0xffffff;
126 if (hdr_size + NVM_DEVID + 2 >= image_size)
127 return -EINVAL;
128
129 /* Digital section start should be aligned to 4k page */
130 if (!IS_ALIGNED(hdr_size, SZ_4K))
131 return -EINVAL;
132
133 /*
134 * Read digital section size and check that it also fits inside
135 * the image.
136 */
137 ds_size = *(u16 *)(buf + hdr_size);
138 if (ds_size >= image_size)
139 return -EINVAL;
140
141 if (!sw->safe_mode) {
142 u16 device_id;
143
144 /*
145 * Make sure the device ID in the image matches the one
146 * we read from the switch config space.
147 */
148 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
149 if (device_id != sw->config.device_id)
150 return -EINVAL;
151
152 if (sw->generation < 3) {
153 /* Write CSS headers first */
154 ret = dma_port_flash_write(sw->dma_port,
155 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
156 DMA_PORT_CSS_MAX_SIZE);
157 if (ret)
158 return ret;
159 }
160
161 /* Skip headers in the image */
162 buf += hdr_size;
163 image_size -= hdr_size;
164 }
165
166 return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
167}
168
169static int nvm_authenticate_host(struct tb_switch *sw)
170{
David Brazdil0f672f62019-12-10 10:32:29 +0000171 int ret = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000172
173 /*
174 * Root switch NVM upgrade requires that we disconnect the
175 * existing paths first (in case it is not in safe mode
176 * already).
177 */
178 if (!sw->safe_mode) {
David Brazdil0f672f62019-12-10 10:32:29 +0000179 u32 status;
180
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000181 ret = tb_domain_disconnect_all_paths(sw->tb);
182 if (ret)
183 return ret;
184 /*
185 * The host controller goes away pretty soon after this if
186 * everything goes well so getting timeout is expected.
187 */
188 ret = dma_port_flash_update_auth(sw->dma_port);
David Brazdil0f672f62019-12-10 10:32:29 +0000189 if (!ret || ret == -ETIMEDOUT)
190 return 0;
191
192 /*
193 * Any error from update auth operation requires power
194 * cycling of the host router.
195 */
196 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
197 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
198 nvm_set_auth_status(sw, status);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000199 }
200
201 /*
202 * From safe mode we can get out by just power cycling the
203 * switch.
204 */
205 dma_port_power_cycle(sw->dma_port);
David Brazdil0f672f62019-12-10 10:32:29 +0000206 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000207}
208
209static int nvm_authenticate_device(struct tb_switch *sw)
210{
211 int ret, retries = 10;
212
213 ret = dma_port_flash_update_auth(sw->dma_port);
David Brazdil0f672f62019-12-10 10:32:29 +0000214 switch (ret) {
215 case 0:
216 case -ETIMEDOUT:
217 case -EACCES:
218 case -EINVAL:
219 /* Power cycle is required */
220 break;
221 default:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000222 return ret;
David Brazdil0f672f62019-12-10 10:32:29 +0000223 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000224
225 /*
226 * Poll here for the authentication status. It takes some time
227 * for the device to respond (we get timeout for a while). Once
228 * we get response the device needs to be power cycled in order
229 * to the new NVM to be taken into use.
230 */
231 do {
232 u32 status;
233
234 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
235 if (ret < 0 && ret != -ETIMEDOUT)
236 return ret;
237 if (ret > 0) {
238 if (status) {
239 tb_sw_warn(sw, "failed to authenticate NVM\n");
240 nvm_set_auth_status(sw, status);
241 }
242
243 tb_sw_info(sw, "power cycling the switch now\n");
244 dma_port_power_cycle(sw->dma_port);
245 return 0;
246 }
247
248 msleep(500);
249 } while (--retries);
250
251 return -ETIMEDOUT;
252}
253
254static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
255 size_t bytes)
256{
257 struct tb_switch *sw = priv;
258 int ret;
259
260 pm_runtime_get_sync(&sw->dev);
David Brazdil0f672f62019-12-10 10:32:29 +0000261
262 if (!mutex_trylock(&sw->tb->lock)) {
263 ret = restart_syscall();
264 goto out;
265 }
266
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000267 ret = dma_port_flash_read(sw->dma_port, offset, val, bytes);
David Brazdil0f672f62019-12-10 10:32:29 +0000268 mutex_unlock(&sw->tb->lock);
269
270out:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000271 pm_runtime_mark_last_busy(&sw->dev);
272 pm_runtime_put_autosuspend(&sw->dev);
273
274 return ret;
275}
276
Olivier Deprez0e641232021-09-23 10:07:05 +0200277static int tb_switch_nvm_no_read(void *priv, unsigned int offset, void *val,
278 size_t bytes)
279{
280 return -EPERM;
281}
282
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000283static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
284 size_t bytes)
285{
286 struct tb_switch *sw = priv;
287 int ret = 0;
288
David Brazdil0f672f62019-12-10 10:32:29 +0000289 if (!mutex_trylock(&sw->tb->lock))
290 return restart_syscall();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000291
292 /*
293 * Since writing the NVM image might require some special steps,
294 * for example when CSS headers are written, we cache the image
295 * locally here and handle the special cases when the user asks
296 * us to authenticate the image.
297 */
298 if (!sw->nvm->buf) {
299 sw->nvm->buf = vmalloc(NVM_MAX_SIZE);
300 if (!sw->nvm->buf) {
301 ret = -ENOMEM;
302 goto unlock;
303 }
304 }
305
306 sw->nvm->buf_data_size = offset + bytes;
307 memcpy(sw->nvm->buf + offset, val, bytes);
308
309unlock:
David Brazdil0f672f62019-12-10 10:32:29 +0000310 mutex_unlock(&sw->tb->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000311
312 return ret;
313}
314
315static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
316 size_t size, bool active)
317{
318 struct nvmem_config config;
319
320 memset(&config, 0, sizeof(config));
321
322 if (active) {
323 config.name = "nvm_active";
324 config.reg_read = tb_switch_nvm_read;
325 config.read_only = true;
326 } else {
327 config.name = "nvm_non_active";
Olivier Deprez0e641232021-09-23 10:07:05 +0200328 config.reg_read = tb_switch_nvm_no_read;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000329 config.reg_write = tb_switch_nvm_write;
330 config.root_only = true;
331 }
332
333 config.id = id;
334 config.stride = 4;
335 config.word_size = 4;
336 config.size = size;
337 config.dev = &sw->dev;
338 config.owner = THIS_MODULE;
339 config.priv = sw;
340
341 return nvmem_register(&config);
342}
343
344static int tb_switch_nvm_add(struct tb_switch *sw)
345{
346 struct nvmem_device *nvm_dev;
347 struct tb_switch_nvm *nvm;
348 u32 val;
349 int ret;
350
351 if (!sw->dma_port)
352 return 0;
353
354 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
355 if (!nvm)
356 return -ENOMEM;
357
358 nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
359
360 /*
361 * If the switch is in safe-mode the only accessible portion of
362 * the NVM is the non-active one where userspace is expected to
363 * write new functional NVM.
364 */
365 if (!sw->safe_mode) {
366 u32 nvm_size, hdr_size;
367
368 ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
369 sizeof(val));
370 if (ret)
371 goto err_ida;
372
373 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
374 nvm_size = (SZ_1M << (val & 7)) / 8;
375 nvm_size = (nvm_size - hdr_size) / 2;
376
377 ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
378 sizeof(val));
379 if (ret)
380 goto err_ida;
381
382 nvm->major = val >> 16;
383 nvm->minor = val >> 8;
384
385 nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true);
386 if (IS_ERR(nvm_dev)) {
387 ret = PTR_ERR(nvm_dev);
388 goto err_ida;
389 }
390 nvm->active = nvm_dev;
391 }
392
David Brazdil0f672f62019-12-10 10:32:29 +0000393 if (!sw->no_nvm_upgrade) {
394 nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
395 if (IS_ERR(nvm_dev)) {
396 ret = PTR_ERR(nvm_dev);
397 goto err_nvm_active;
398 }
399 nvm->non_active = nvm_dev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000400 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000401
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000402 sw->nvm = nvm;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000403 return 0;
404
405err_nvm_active:
406 if (nvm->active)
407 nvmem_unregister(nvm->active);
408err_ida:
409 ida_simple_remove(&nvm_ida, nvm->id);
410 kfree(nvm);
411
412 return ret;
413}
414
415static void tb_switch_nvm_remove(struct tb_switch *sw)
416{
417 struct tb_switch_nvm *nvm;
418
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000419 nvm = sw->nvm;
420 sw->nvm = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000421
422 if (!nvm)
423 return;
424
425 /* Remove authentication status in case the switch is unplugged */
426 if (!nvm->authenticating)
427 nvm_clear_auth_status(sw);
428
David Brazdil0f672f62019-12-10 10:32:29 +0000429 if (nvm->non_active)
430 nvmem_unregister(nvm->non_active);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000431 if (nvm->active)
432 nvmem_unregister(nvm->active);
433 ida_simple_remove(&nvm_ida, nvm->id);
434 vfree(nvm->buf);
435 kfree(nvm);
436}
437
438/* port utility functions */
439
440static const char *tb_port_type(struct tb_regs_port_header *port)
441{
442 switch (port->type >> 16) {
443 case 0:
444 switch ((u8) port->type) {
445 case 0:
446 return "Inactive";
447 case 1:
448 return "Port";
449 case 2:
450 return "NHI";
451 default:
452 return "unknown";
453 }
454 case 0x2:
455 return "Ethernet";
456 case 0x8:
457 return "SATA";
458 case 0xe:
459 return "DP/HDMI";
460 case 0x10:
461 return "PCIe";
462 case 0x20:
463 return "USB";
464 default:
465 return "unknown";
466 }
467}
468
469static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
470{
David Brazdil0f672f62019-12-10 10:32:29 +0000471 tb_dbg(tb,
472 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
473 port->port_number, port->vendor_id, port->device_id,
474 port->revision, port->thunderbolt_version, tb_port_type(port),
475 port->type);
476 tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
477 port->max_in_hop_id, port->max_out_hop_id);
478 tb_dbg(tb, " Max counters: %d\n", port->max_counters);
479 tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480}
481
482/**
483 * tb_port_state() - get connectedness state of a port
484 *
485 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
486 *
487 * Return: Returns an enum tb_port_state on success or an error code on failure.
488 */
489static int tb_port_state(struct tb_port *port)
490{
491 struct tb_cap_phy phy;
492 int res;
493 if (port->cap_phy == 0) {
494 tb_port_WARN(port, "does not have a PHY\n");
495 return -EINVAL;
496 }
497 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
498 if (res)
499 return res;
500 return phy.state;
501}
502
503/**
504 * tb_wait_for_port() - wait for a port to become ready
505 *
506 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
507 * wait_if_unplugged is set then we also wait if the port is in state
508 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
509 * switch resume). Otherwise we only wait if a device is registered but the link
510 * has not yet been established.
511 *
512 * Return: Returns an error code on failure. Returns 0 if the port is not
513 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
514 * if the port is connected and in state TB_PORT_UP.
515 */
516int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
517{
518 int retries = 10;
519 int state;
520 if (!port->cap_phy) {
521 tb_port_WARN(port, "does not have PHY\n");
522 return -EINVAL;
523 }
524 if (tb_is_upstream_port(port)) {
525 tb_port_WARN(port, "is the upstream port\n");
526 return -EINVAL;
527 }
528
529 while (retries--) {
530 state = tb_port_state(port);
531 if (state < 0)
532 return state;
533 if (state == TB_PORT_DISABLED) {
David Brazdil0f672f62019-12-10 10:32:29 +0000534 tb_port_dbg(port, "is disabled (state: 0)\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000535 return 0;
536 }
537 if (state == TB_PORT_UNPLUGGED) {
538 if (wait_if_unplugged) {
539 /* used during resume */
David Brazdil0f672f62019-12-10 10:32:29 +0000540 tb_port_dbg(port,
541 "is unplugged (state: 7), retrying...\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000542 msleep(100);
543 continue;
544 }
David Brazdil0f672f62019-12-10 10:32:29 +0000545 tb_port_dbg(port, "is unplugged (state: 7)\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000546 return 0;
547 }
548 if (state == TB_PORT_UP) {
David Brazdil0f672f62019-12-10 10:32:29 +0000549 tb_port_dbg(port, "is connected, link is up (state: 2)\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000550 return 1;
551 }
552
553 /*
554 * After plug-in the state is TB_PORT_CONNECTING. Give it some
555 * time.
556 */
David Brazdil0f672f62019-12-10 10:32:29 +0000557 tb_port_dbg(port,
558 "is connected, link is not up (state: %d), retrying...\n",
559 state);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000560 msleep(100);
561 }
562 tb_port_warn(port,
563 "failed to reach state TB_PORT_UP. Ignoring port...\n");
564 return 0;
565}
566
567/**
568 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
569 *
570 * Change the number of NFC credits allocated to @port by @credits. To remove
571 * NFC credits pass a negative amount of credits.
572 *
573 * Return: Returns 0 on success or an error code on failure.
574 */
575int tb_port_add_nfc_credits(struct tb_port *port, int credits)
576{
David Brazdil0f672f62019-12-10 10:32:29 +0000577 u32 nfc_credits;
578
579 if (credits == 0 || port->sw->is_unplugged)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000580 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000581
582 nfc_credits = port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK;
583 nfc_credits += credits;
584
585 tb_port_dbg(port, "adding %d NFC credits to %lu",
586 credits, port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK);
587
588 port->config.nfc_credits &= ~TB_PORT_NFC_CREDITS_MASK;
589 port->config.nfc_credits |= nfc_credits;
590
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000591 return tb_port_write(port, &port->config.nfc_credits,
592 TB_CFG_PORT, 4, 1);
593}
594
595/**
David Brazdil0f672f62019-12-10 10:32:29 +0000596 * tb_port_set_initial_credits() - Set initial port link credits allocated
597 * @port: Port to set the initial credits
598 * @credits: Number of credits to to allocate
599 *
600 * Set initial credits value to be used for ingress shared buffering.
601 */
602int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
603{
604 u32 data;
605 int ret;
606
607 ret = tb_port_read(port, &data, TB_CFG_PORT, 5, 1);
608 if (ret)
609 return ret;
610
611 data &= ~TB_PORT_LCA_MASK;
612 data |= (credits << TB_PORT_LCA_SHIFT) & TB_PORT_LCA_MASK;
613
614 return tb_port_write(port, &data, TB_CFG_PORT, 5, 1);
615}
616
617/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000618 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
619 *
620 * Return: Returns 0 on success or an error code on failure.
621 */
622int tb_port_clear_counter(struct tb_port *port, int counter)
623{
624 u32 zero[3] = { 0, 0, 0 };
David Brazdil0f672f62019-12-10 10:32:29 +0000625 tb_port_dbg(port, "clearing counter %d\n", counter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000626 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
627}
628
629/**
630 * tb_init_port() - initialize a port
631 *
632 * This is a helper method for tb_switch_alloc. Does not check or initialize
633 * any downstream switches.
634 *
635 * Return: Returns 0 on success or an error code on failure.
636 */
637static int tb_init_port(struct tb_port *port)
638{
639 int res;
640 int cap;
641
642 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
David Brazdil0f672f62019-12-10 10:32:29 +0000643 if (res) {
644 if (res == -ENODEV) {
645 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
646 port->port);
647 return 0;
648 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000649 return res;
David Brazdil0f672f62019-12-10 10:32:29 +0000650 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000651
652 /* Port 0 is the switch itself and has no PHY. */
653 if (port->config.type == TB_TYPE_PORT && port->port != 0) {
654 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
655
656 if (cap > 0)
657 port->cap_phy = cap;
658 else
659 tb_port_WARN(port, "non switch port without a PHY\n");
David Brazdil0f672f62019-12-10 10:32:29 +0000660 } else if (port->port != 0) {
661 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
662 if (cap > 0)
663 port->cap_adap = cap;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000664 }
665
666 tb_dump_port(port->sw->tb, &port->config);
667
David Brazdil0f672f62019-12-10 10:32:29 +0000668 /* Control port does not need HopID allocation */
669 if (port->port) {
670 ida_init(&port->in_hopids);
671 ida_init(&port->out_hopids);
672 }
673
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000674 return 0;
675
676}
677
David Brazdil0f672f62019-12-10 10:32:29 +0000678static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
679 int max_hopid)
680{
681 int port_max_hopid;
682 struct ida *ida;
683
684 if (in) {
685 port_max_hopid = port->config.max_in_hop_id;
686 ida = &port->in_hopids;
687 } else {
688 port_max_hopid = port->config.max_out_hop_id;
689 ida = &port->out_hopids;
690 }
691
692 /* HopIDs 0-7 are reserved */
693 if (min_hopid < TB_PATH_MIN_HOPID)
694 min_hopid = TB_PATH_MIN_HOPID;
695
696 if (max_hopid < 0 || max_hopid > port_max_hopid)
697 max_hopid = port_max_hopid;
698
699 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
700}
701
702/**
703 * tb_port_alloc_in_hopid() - Allocate input HopID from port
704 * @port: Port to allocate HopID for
705 * @min_hopid: Minimum acceptable input HopID
706 * @max_hopid: Maximum acceptable input HopID
707 *
708 * Return: HopID between @min_hopid and @max_hopid or negative errno in
709 * case of error.
710 */
711int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
712{
713 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
714}
715
716/**
717 * tb_port_alloc_out_hopid() - Allocate output HopID from port
718 * @port: Port to allocate HopID for
719 * @min_hopid: Minimum acceptable output HopID
720 * @max_hopid: Maximum acceptable output HopID
721 *
722 * Return: HopID between @min_hopid and @max_hopid or negative errno in
723 * case of error.
724 */
725int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
726{
727 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
728}
729
730/**
731 * tb_port_release_in_hopid() - Release allocated input HopID from port
732 * @port: Port whose HopID to release
733 * @hopid: HopID to release
734 */
735void tb_port_release_in_hopid(struct tb_port *port, int hopid)
736{
737 ida_simple_remove(&port->in_hopids, hopid);
738}
739
740/**
741 * tb_port_release_out_hopid() - Release allocated output HopID from port
742 * @port: Port whose HopID to release
743 * @hopid: HopID to release
744 */
745void tb_port_release_out_hopid(struct tb_port *port, int hopid)
746{
747 ida_simple_remove(&port->out_hopids, hopid);
748}
749
750/**
751 * tb_next_port_on_path() - Return next port for given port on a path
752 * @start: Start port of the walk
753 * @end: End port of the walk
754 * @prev: Previous port (%NULL if this is the first)
755 *
756 * This function can be used to walk from one port to another if they
757 * are connected through zero or more switches. If the @prev is dual
758 * link port, the function follows that link and returns another end on
759 * that same link.
760 *
761 * If the @end port has been reached, return %NULL.
762 *
763 * Domain tb->lock must be held when this function is called.
764 */
765struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
766 struct tb_port *prev)
767{
768 struct tb_port *next;
769
770 if (!prev)
771 return start;
772
773 if (prev->sw == end->sw) {
774 if (prev == end)
775 return NULL;
776 return end;
777 }
778
779 if (start->sw->config.depth < end->sw->config.depth) {
780 if (prev->remote &&
781 prev->remote->sw->config.depth > prev->sw->config.depth)
782 next = prev->remote;
783 else
784 next = tb_port_at(tb_route(end->sw), prev->sw);
785 } else {
786 if (tb_is_upstream_port(prev)) {
787 next = prev->remote;
788 } else {
789 next = tb_upstream_port(prev->sw);
790 /*
791 * Keep the same link if prev and next are both
792 * dual link ports.
793 */
794 if (next->dual_link_port &&
795 next->link_nr != prev->link_nr) {
796 next = next->dual_link_port;
797 }
798 }
799 }
800
801 return next;
802}
803
804/**
805 * tb_port_is_enabled() - Is the adapter port enabled
806 * @port: Port to check
807 */
808bool tb_port_is_enabled(struct tb_port *port)
809{
810 switch (port->config.type) {
811 case TB_TYPE_PCIE_UP:
812 case TB_TYPE_PCIE_DOWN:
813 return tb_pci_port_is_enabled(port);
814
815 case TB_TYPE_DP_HDMI_IN:
816 case TB_TYPE_DP_HDMI_OUT:
817 return tb_dp_port_is_enabled(port);
818
819 default:
820 return false;
821 }
822}
823
824/**
825 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
826 * @port: PCIe port to check
827 */
828bool tb_pci_port_is_enabled(struct tb_port *port)
829{
830 u32 data;
831
832 if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
833 return false;
834
835 return !!(data & TB_PCI_EN);
836}
837
838/**
839 * tb_pci_port_enable() - Enable PCIe adapter port
840 * @port: PCIe port to enable
841 * @enable: Enable/disable the PCIe adapter
842 */
843int tb_pci_port_enable(struct tb_port *port, bool enable)
844{
845 u32 word = enable ? TB_PCI_EN : 0x0;
846 if (!port->cap_adap)
847 return -ENXIO;
848 return tb_port_write(port, &word, TB_CFG_PORT, port->cap_adap, 1);
849}
850
851/**
852 * tb_dp_port_hpd_is_active() - Is HPD already active
853 * @port: DP out port to check
854 *
855 * Checks if the DP OUT adapter port has HDP bit already set.
856 */
857int tb_dp_port_hpd_is_active(struct tb_port *port)
858{
859 u32 data;
860 int ret;
861
862 ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 2, 1);
863 if (ret)
864 return ret;
865
866 return !!(data & TB_DP_HDP);
867}
868
869/**
870 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
871 * @port: Port to clear HPD
872 *
873 * If the DP IN port has HDP set, this function can be used to clear it.
874 */
875int tb_dp_port_hpd_clear(struct tb_port *port)
876{
877 u32 data;
878 int ret;
879
880 ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
881 if (ret)
882 return ret;
883
884 data |= TB_DP_HPDC;
885 return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
886}
887
888/**
889 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
890 * @port: DP IN/OUT port to set hops
891 * @video: Video Hop ID
892 * @aux_tx: AUX TX Hop ID
893 * @aux_rx: AUX RX Hop ID
894 *
895 * Programs specified Hop IDs for DP IN/OUT port.
896 */
897int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
898 unsigned int aux_tx, unsigned int aux_rx)
899{
900 u32 data[2];
901 int ret;
902
903 ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
904 ARRAY_SIZE(data));
905 if (ret)
906 return ret;
907
908 data[0] &= ~TB_DP_VIDEO_HOPID_MASK;
909 data[1] &= ~(TB_DP_AUX_RX_HOPID_MASK | TB_DP_AUX_TX_HOPID_MASK);
910
911 data[0] |= (video << TB_DP_VIDEO_HOPID_SHIFT) & TB_DP_VIDEO_HOPID_MASK;
912 data[1] |= aux_tx & TB_DP_AUX_TX_HOPID_MASK;
913 data[1] |= (aux_rx << TB_DP_AUX_RX_HOPID_SHIFT) & TB_DP_AUX_RX_HOPID_MASK;
914
915 return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
916 ARRAY_SIZE(data));
917}
918
919/**
920 * tb_dp_port_is_enabled() - Is DP adapter port enabled
921 * @port: DP adapter port to check
922 */
923bool tb_dp_port_is_enabled(struct tb_port *port)
924{
925 u32 data[2];
926
927 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
928 ARRAY_SIZE(data)))
929 return false;
930
931 return !!(data[0] & (TB_DP_VIDEO_EN | TB_DP_AUX_EN));
932}
933
934/**
935 * tb_dp_port_enable() - Enables/disables DP paths of a port
936 * @port: DP IN/OUT port
937 * @enable: Enable/disable DP path
938 *
939 * Once Hop IDs are programmed DP paths can be enabled or disabled by
940 * calling this function.
941 */
942int tb_dp_port_enable(struct tb_port *port, bool enable)
943{
944 u32 data[2];
945 int ret;
946
947 ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
948 ARRAY_SIZE(data));
949 if (ret)
950 return ret;
951
952 if (enable)
953 data[0] |= TB_DP_VIDEO_EN | TB_DP_AUX_EN;
954 else
955 data[0] &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN);
956
957 return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
958 ARRAY_SIZE(data));
959}
960
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000961/* switch utility functions */
962
963static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
964{
David Brazdil0f672f62019-12-10 10:32:29 +0000965 tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
966 sw->vendor_id, sw->device_id, sw->revision,
967 sw->thunderbolt_version);
968 tb_dbg(tb, " Max Port Number: %d\n", sw->max_port_number);
969 tb_dbg(tb, " Config:\n");
970 tb_dbg(tb,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000971 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
David Brazdil0f672f62019-12-10 10:32:29 +0000972 sw->upstream_port_number, sw->depth,
973 (((u64) sw->route_hi) << 32) | sw->route_lo,
974 sw->enabled, sw->plug_events_delay);
975 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
976 sw->__unknown1, sw->__unknown4);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000977}
978
979/**
980 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
981 *
982 * Return: Returns 0 on success or an error code on failure.
983 */
984int tb_switch_reset(struct tb *tb, u64 route)
985{
986 struct tb_cfg_result res;
987 struct tb_regs_switch_header header = {
988 header.route_hi = route >> 32,
989 header.route_lo = route,
990 header.enabled = true,
991 };
David Brazdil0f672f62019-12-10 10:32:29 +0000992 tb_dbg(tb, "resetting switch at %llx\n", route);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000993 res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
994 0, 2, 2, 2);
995 if (res.err)
996 return res.err;
997 res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
998 if (res.err > 0)
999 return -EIO;
1000 return res.err;
1001}
1002
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001003/**
1004 * tb_plug_events_active() - enable/disable plug events on a switch
1005 *
1006 * Also configures a sane plug_events_delay of 255ms.
1007 *
1008 * Return: Returns 0 on success or an error code on failure.
1009 */
1010static int tb_plug_events_active(struct tb_switch *sw, bool active)
1011{
1012 u32 data;
1013 int res;
1014
1015 if (!sw->config.enabled)
1016 return 0;
1017
1018 sw->config.plug_events_delay = 0xff;
1019 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1020 if (res)
1021 return res;
1022
1023 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1024 if (res)
1025 return res;
1026
1027 if (active) {
1028 data = data & 0xFFFFFF83;
1029 switch (sw->config.device_id) {
1030 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1031 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1032 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1033 break;
1034 default:
1035 data |= 4;
1036 }
1037 } else {
1038 data = data | 0x7c;
1039 }
1040 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1041 sw->cap_plug_events + 1, 1);
1042}
1043
1044static ssize_t authorized_show(struct device *dev,
1045 struct device_attribute *attr,
1046 char *buf)
1047{
1048 struct tb_switch *sw = tb_to_switch(dev);
1049
1050 return sprintf(buf, "%u\n", sw->authorized);
1051}
1052
1053static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1054{
1055 int ret = -EINVAL;
1056
David Brazdil0f672f62019-12-10 10:32:29 +00001057 if (!mutex_trylock(&sw->tb->lock))
1058 return restart_syscall();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001059
1060 if (sw->authorized)
1061 goto unlock;
1062
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001063 switch (val) {
1064 /* Approve switch */
1065 case 1:
1066 if (sw->key)
1067 ret = tb_domain_approve_switch_key(sw->tb, sw);
1068 else
1069 ret = tb_domain_approve_switch(sw->tb, sw);
1070 break;
1071
1072 /* Challenge switch */
1073 case 2:
1074 if (sw->key)
1075 ret = tb_domain_challenge_switch_key(sw->tb, sw);
1076 break;
1077
1078 default:
1079 break;
1080 }
1081
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001082 if (!ret) {
1083 sw->authorized = val;
1084 /* Notify status change to the userspace */
1085 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
1086 }
1087
1088unlock:
David Brazdil0f672f62019-12-10 10:32:29 +00001089 mutex_unlock(&sw->tb->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001090 return ret;
1091}
1092
1093static ssize_t authorized_store(struct device *dev,
1094 struct device_attribute *attr,
1095 const char *buf, size_t count)
1096{
1097 struct tb_switch *sw = tb_to_switch(dev);
1098 unsigned int val;
1099 ssize_t ret;
1100
1101 ret = kstrtouint(buf, 0, &val);
1102 if (ret)
1103 return ret;
1104 if (val > 2)
1105 return -EINVAL;
1106
David Brazdil0f672f62019-12-10 10:32:29 +00001107 pm_runtime_get_sync(&sw->dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001108 ret = tb_switch_set_authorized(sw, val);
David Brazdil0f672f62019-12-10 10:32:29 +00001109 pm_runtime_mark_last_busy(&sw->dev);
1110 pm_runtime_put_autosuspend(&sw->dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001111
1112 return ret ? ret : count;
1113}
1114static DEVICE_ATTR_RW(authorized);
1115
1116static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1117 char *buf)
1118{
1119 struct tb_switch *sw = tb_to_switch(dev);
1120
1121 return sprintf(buf, "%u\n", sw->boot);
1122}
1123static DEVICE_ATTR_RO(boot);
1124
1125static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1126 char *buf)
1127{
1128 struct tb_switch *sw = tb_to_switch(dev);
1129
1130 return sprintf(buf, "%#x\n", sw->device);
1131}
1132static DEVICE_ATTR_RO(device);
1133
1134static ssize_t
1135device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1136{
1137 struct tb_switch *sw = tb_to_switch(dev);
1138
1139 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1140}
1141static DEVICE_ATTR_RO(device_name);
1142
1143static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1144 char *buf)
1145{
1146 struct tb_switch *sw = tb_to_switch(dev);
1147 ssize_t ret;
1148
David Brazdil0f672f62019-12-10 10:32:29 +00001149 if (!mutex_trylock(&sw->tb->lock))
1150 return restart_syscall();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001151
1152 if (sw->key)
1153 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1154 else
1155 ret = sprintf(buf, "\n");
1156
David Brazdil0f672f62019-12-10 10:32:29 +00001157 mutex_unlock(&sw->tb->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001158 return ret;
1159}
1160
1161static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1162 const char *buf, size_t count)
1163{
1164 struct tb_switch *sw = tb_to_switch(dev);
1165 u8 key[TB_SWITCH_KEY_SIZE];
1166 ssize_t ret = count;
1167 bool clear = false;
1168
1169 if (!strcmp(buf, "\n"))
1170 clear = true;
1171 else if (hex2bin(key, buf, sizeof(key)))
1172 return -EINVAL;
1173
David Brazdil0f672f62019-12-10 10:32:29 +00001174 if (!mutex_trylock(&sw->tb->lock))
1175 return restart_syscall();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001176
1177 if (sw->authorized) {
1178 ret = -EBUSY;
1179 } else {
1180 kfree(sw->key);
1181 if (clear) {
1182 sw->key = NULL;
1183 } else {
1184 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1185 if (!sw->key)
1186 ret = -ENOMEM;
1187 }
1188 }
1189
David Brazdil0f672f62019-12-10 10:32:29 +00001190 mutex_unlock(&sw->tb->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001191 return ret;
1192}
1193static DEVICE_ATTR(key, 0600, key_show, key_store);
1194
1195static void nvm_authenticate_start(struct tb_switch *sw)
1196{
1197 struct pci_dev *root_port;
1198
1199 /*
1200 * During host router NVM upgrade we should not allow root port to
1201 * go into D3cold because some root ports cannot trigger PME
1202 * itself. To be on the safe side keep the root port in D0 during
1203 * the whole upgrade process.
1204 */
1205 root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
1206 if (root_port)
1207 pm_runtime_get_noresume(&root_port->dev);
1208}
1209
1210static void nvm_authenticate_complete(struct tb_switch *sw)
1211{
1212 struct pci_dev *root_port;
1213
1214 root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
1215 if (root_port)
1216 pm_runtime_put(&root_port->dev);
1217}
1218
1219static ssize_t nvm_authenticate_show(struct device *dev,
1220 struct device_attribute *attr, char *buf)
1221{
1222 struct tb_switch *sw = tb_to_switch(dev);
1223 u32 status;
1224
1225 nvm_get_auth_status(sw, &status);
1226 return sprintf(buf, "%#x\n", status);
1227}
1228
1229static ssize_t nvm_authenticate_store(struct device *dev,
1230 struct device_attribute *attr, const char *buf, size_t count)
1231{
1232 struct tb_switch *sw = tb_to_switch(dev);
1233 bool val;
1234 int ret;
1235
David Brazdil0f672f62019-12-10 10:32:29 +00001236 pm_runtime_get_sync(&sw->dev);
1237
1238 if (!mutex_trylock(&sw->tb->lock)) {
1239 ret = restart_syscall();
1240 goto exit_rpm;
1241 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001242
1243 /* If NVMem devices are not yet added */
1244 if (!sw->nvm) {
1245 ret = -EAGAIN;
1246 goto exit_unlock;
1247 }
1248
1249 ret = kstrtobool(buf, &val);
1250 if (ret)
1251 goto exit_unlock;
1252
1253 /* Always clear the authentication status */
1254 nvm_clear_auth_status(sw);
1255
1256 if (val) {
1257 if (!sw->nvm->buf) {
1258 ret = -EINVAL;
1259 goto exit_unlock;
1260 }
1261
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001262 ret = nvm_validate_and_write(sw);
David Brazdil0f672f62019-12-10 10:32:29 +00001263 if (ret)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001264 goto exit_unlock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001265
1266 sw->nvm->authenticating = true;
1267
1268 if (!tb_route(sw)) {
1269 /*
1270 * Keep root port from suspending as long as the
1271 * NVM upgrade process is running.
1272 */
1273 nvm_authenticate_start(sw);
1274 ret = nvm_authenticate_host(sw);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001275 } else {
1276 ret = nvm_authenticate_device(sw);
1277 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001278 }
1279
1280exit_unlock:
David Brazdil0f672f62019-12-10 10:32:29 +00001281 mutex_unlock(&sw->tb->lock);
1282exit_rpm:
1283 pm_runtime_mark_last_busy(&sw->dev);
1284 pm_runtime_put_autosuspend(&sw->dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001285
1286 if (ret)
1287 return ret;
1288 return count;
1289}
1290static DEVICE_ATTR_RW(nvm_authenticate);
1291
1292static ssize_t nvm_version_show(struct device *dev,
1293 struct device_attribute *attr, char *buf)
1294{
1295 struct tb_switch *sw = tb_to_switch(dev);
1296 int ret;
1297
David Brazdil0f672f62019-12-10 10:32:29 +00001298 if (!mutex_trylock(&sw->tb->lock))
1299 return restart_syscall();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001300
1301 if (sw->safe_mode)
1302 ret = -ENODATA;
1303 else if (!sw->nvm)
1304 ret = -EAGAIN;
1305 else
1306 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1307
David Brazdil0f672f62019-12-10 10:32:29 +00001308 mutex_unlock(&sw->tb->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001309
1310 return ret;
1311}
1312static DEVICE_ATTR_RO(nvm_version);
1313
1314static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1315 char *buf)
1316{
1317 struct tb_switch *sw = tb_to_switch(dev);
1318
1319 return sprintf(buf, "%#x\n", sw->vendor);
1320}
1321static DEVICE_ATTR_RO(vendor);
1322
1323static ssize_t
1324vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1325{
1326 struct tb_switch *sw = tb_to_switch(dev);
1327
1328 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1329}
1330static DEVICE_ATTR_RO(vendor_name);
1331
1332static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1333 char *buf)
1334{
1335 struct tb_switch *sw = tb_to_switch(dev);
1336
1337 return sprintf(buf, "%pUb\n", sw->uuid);
1338}
1339static DEVICE_ATTR_RO(unique_id);
1340
1341static struct attribute *switch_attrs[] = {
1342 &dev_attr_authorized.attr,
1343 &dev_attr_boot.attr,
1344 &dev_attr_device.attr,
1345 &dev_attr_device_name.attr,
1346 &dev_attr_key.attr,
1347 &dev_attr_nvm_authenticate.attr,
1348 &dev_attr_nvm_version.attr,
1349 &dev_attr_vendor.attr,
1350 &dev_attr_vendor_name.attr,
1351 &dev_attr_unique_id.attr,
1352 NULL,
1353};
1354
1355static umode_t switch_attr_is_visible(struct kobject *kobj,
1356 struct attribute *attr, int n)
1357{
1358 struct device *dev = container_of(kobj, struct device, kobj);
1359 struct tb_switch *sw = tb_to_switch(dev);
1360
David Brazdil0f672f62019-12-10 10:32:29 +00001361 if (attr == &dev_attr_device.attr) {
1362 if (!sw->device)
1363 return 0;
1364 } else if (attr == &dev_attr_device_name.attr) {
1365 if (!sw->device_name)
1366 return 0;
1367 } else if (attr == &dev_attr_vendor.attr) {
1368 if (!sw->vendor)
1369 return 0;
1370 } else if (attr == &dev_attr_vendor_name.attr) {
1371 if (!sw->vendor_name)
1372 return 0;
1373 } else if (attr == &dev_attr_key.attr) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001374 if (tb_route(sw) &&
1375 sw->tb->security_level == TB_SECURITY_SECURE &&
1376 sw->security_level == TB_SECURITY_SECURE)
1377 return attr->mode;
1378 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001379 } else if (attr == &dev_attr_nvm_authenticate.attr) {
1380 if (sw->dma_port && !sw->no_nvm_upgrade)
1381 return attr->mode;
1382 return 0;
1383 } else if (attr == &dev_attr_nvm_version.attr) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001384 if (sw->dma_port)
1385 return attr->mode;
1386 return 0;
1387 } else if (attr == &dev_attr_boot.attr) {
1388 if (tb_route(sw))
1389 return attr->mode;
1390 return 0;
1391 }
1392
1393 return sw->safe_mode ? 0 : attr->mode;
1394}
1395
1396static struct attribute_group switch_group = {
1397 .is_visible = switch_attr_is_visible,
1398 .attrs = switch_attrs,
1399};
1400
1401static const struct attribute_group *switch_groups[] = {
1402 &switch_group,
1403 NULL,
1404};
1405
1406static void tb_switch_release(struct device *dev)
1407{
1408 struct tb_switch *sw = tb_to_switch(dev);
David Brazdil0f672f62019-12-10 10:32:29 +00001409 int i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001410
1411 dma_port_free(sw->dma_port);
1412
David Brazdil0f672f62019-12-10 10:32:29 +00001413 for (i = 1; i <= sw->config.max_port_number; i++) {
1414 if (!sw->ports[i].disabled) {
1415 ida_destroy(&sw->ports[i].in_hopids);
1416 ida_destroy(&sw->ports[i].out_hopids);
1417 }
1418 }
1419
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001420 kfree(sw->uuid);
1421 kfree(sw->device_name);
1422 kfree(sw->vendor_name);
1423 kfree(sw->ports);
1424 kfree(sw->drom);
1425 kfree(sw->key);
1426 kfree(sw);
1427}
1428
1429/*
1430 * Currently only need to provide the callbacks. Everything else is handled
1431 * in the connection manager.
1432 */
1433static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
1434{
David Brazdil0f672f62019-12-10 10:32:29 +00001435 struct tb_switch *sw = tb_to_switch(dev);
1436 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1437
1438 if (cm_ops->runtime_suspend_switch)
1439 return cm_ops->runtime_suspend_switch(sw);
1440
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001441 return 0;
1442}
1443
1444static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
1445{
David Brazdil0f672f62019-12-10 10:32:29 +00001446 struct tb_switch *sw = tb_to_switch(dev);
1447 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1448
1449 if (cm_ops->runtime_resume_switch)
1450 return cm_ops->runtime_resume_switch(sw);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001451 return 0;
1452}
1453
1454static const struct dev_pm_ops tb_switch_pm_ops = {
1455 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
1456 NULL)
1457};
1458
1459struct device_type tb_switch_type = {
1460 .name = "thunderbolt_device",
1461 .release = tb_switch_release,
1462 .pm = &tb_switch_pm_ops,
1463};
1464
1465static int tb_switch_get_generation(struct tb_switch *sw)
1466{
1467 switch (sw->config.device_id) {
1468 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1469 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1470 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
1471 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
1472 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1473 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1474 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
1475 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
1476 return 1;
1477
1478 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
1479 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
1480 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
1481 return 2;
1482
1483 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1484 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1485 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1486 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1487 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1488 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1489 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1490 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
David Brazdil0f672f62019-12-10 10:32:29 +00001491 case PCI_DEVICE_ID_INTEL_ICL_NHI0:
1492 case PCI_DEVICE_ID_INTEL_ICL_NHI1:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001493 return 3;
1494
1495 default:
1496 /*
1497 * For unknown switches assume generation to be 1 to be
1498 * on the safe side.
1499 */
1500 tb_sw_warn(sw, "unsupported switch device id %#x\n",
1501 sw->config.device_id);
1502 return 1;
1503 }
1504}
1505
1506/**
1507 * tb_switch_alloc() - allocate a switch
1508 * @tb: Pointer to the owning domain
1509 * @parent: Parent device for this switch
1510 * @route: Route string for this switch
1511 *
1512 * Allocates and initializes a switch. Will not upload configuration to
1513 * the switch. For that you need to call tb_switch_configure()
1514 * separately. The returned switch should be released by calling
1515 * tb_switch_put().
1516 *
David Brazdil0f672f62019-12-10 10:32:29 +00001517 * Return: Pointer to the allocated switch or ERR_PTR() in case of
1518 * failure.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001519 */
1520struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1521 u64 route)
1522{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001523 struct tb_switch *sw;
David Brazdil0f672f62019-12-10 10:32:29 +00001524 int upstream_port;
1525 int i, ret, depth;
1526
1527 /* Make sure we do not exceed maximum topology limit */
1528 depth = tb_route_length(route);
1529 if (depth > TB_SWITCH_MAX_DEPTH)
1530 return ERR_PTR(-EADDRNOTAVAIL);
1531
1532 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001533 if (upstream_port < 0)
David Brazdil0f672f62019-12-10 10:32:29 +00001534 return ERR_PTR(upstream_port);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001535
1536 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1537 if (!sw)
David Brazdil0f672f62019-12-10 10:32:29 +00001538 return ERR_PTR(-ENOMEM);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001539
1540 sw->tb = tb;
David Brazdil0f672f62019-12-10 10:32:29 +00001541 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
1542 if (ret)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001543 goto err_free_sw_ports;
1544
David Brazdil0f672f62019-12-10 10:32:29 +00001545 tb_dbg(tb, "current switch config:\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001546 tb_dump_switch(tb, &sw->config);
1547
1548 /* configure switch */
1549 sw->config.upstream_port_number = upstream_port;
David Brazdil0f672f62019-12-10 10:32:29 +00001550 sw->config.depth = depth;
1551 sw->config.route_hi = upper_32_bits(route);
1552 sw->config.route_lo = lower_32_bits(route);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001553 sw->config.enabled = 0;
1554
1555 /* initialize ports */
1556 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
1557 GFP_KERNEL);
David Brazdil0f672f62019-12-10 10:32:29 +00001558 if (!sw->ports) {
1559 ret = -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001560 goto err_free_sw_ports;
David Brazdil0f672f62019-12-10 10:32:29 +00001561 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001562
1563 for (i = 0; i <= sw->config.max_port_number; i++) {
1564 /* minimum setup for tb_find_cap and tb_drom_read to work */
1565 sw->ports[i].sw = sw;
1566 sw->ports[i].port = i;
1567 }
1568
1569 sw->generation = tb_switch_get_generation(sw);
1570
David Brazdil0f672f62019-12-10 10:32:29 +00001571 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
1572 if (ret < 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001573 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
1574 goto err_free_sw_ports;
1575 }
David Brazdil0f672f62019-12-10 10:32:29 +00001576 sw->cap_plug_events = ret;
1577
1578 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
1579 if (ret > 0)
1580 sw->cap_lc = ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001581
1582 /* Root switch is always authorized */
1583 if (!route)
1584 sw->authorized = true;
1585
1586 device_initialize(&sw->dev);
1587 sw->dev.parent = parent;
1588 sw->dev.bus = &tb_bus_type;
1589 sw->dev.type = &tb_switch_type;
1590 sw->dev.groups = switch_groups;
1591 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1592
1593 return sw;
1594
1595err_free_sw_ports:
1596 kfree(sw->ports);
1597 kfree(sw);
1598
David Brazdil0f672f62019-12-10 10:32:29 +00001599 return ERR_PTR(ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001600}
1601
1602/**
1603 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
1604 * @tb: Pointer to the owning domain
1605 * @parent: Parent device for this switch
1606 * @route: Route string for this switch
1607 *
1608 * This creates a switch in safe mode. This means the switch pretty much
1609 * lacks all capabilities except DMA configuration port before it is
1610 * flashed with a valid NVM firmware.
1611 *
1612 * The returned switch must be released by calling tb_switch_put().
1613 *
David Brazdil0f672f62019-12-10 10:32:29 +00001614 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001615 */
1616struct tb_switch *
1617tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
1618{
1619 struct tb_switch *sw;
1620
1621 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1622 if (!sw)
David Brazdil0f672f62019-12-10 10:32:29 +00001623 return ERR_PTR(-ENOMEM);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001624
1625 sw->tb = tb;
1626 sw->config.depth = tb_route_length(route);
1627 sw->config.route_hi = upper_32_bits(route);
1628 sw->config.route_lo = lower_32_bits(route);
1629 sw->safe_mode = true;
1630
1631 device_initialize(&sw->dev);
1632 sw->dev.parent = parent;
1633 sw->dev.bus = &tb_bus_type;
1634 sw->dev.type = &tb_switch_type;
1635 sw->dev.groups = switch_groups;
1636 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1637
1638 return sw;
1639}
1640
1641/**
1642 * tb_switch_configure() - Uploads configuration to the switch
1643 * @sw: Switch to configure
1644 *
1645 * Call this function before the switch is added to the system. It will
1646 * upload configuration to the switch and makes it available for the
1647 * connection manager to use.
1648 *
1649 * Return: %0 in case of success and negative errno in case of failure
1650 */
1651int tb_switch_configure(struct tb_switch *sw)
1652{
1653 struct tb *tb = sw->tb;
1654 u64 route;
1655 int ret;
1656
1657 route = tb_route(sw);
David Brazdil0f672f62019-12-10 10:32:29 +00001658 tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n",
1659 route, tb_route_length(route), sw->config.upstream_port_number);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001660
1661 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
1662 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
1663 sw->config.vendor_id);
1664
1665 sw->config.enabled = 1;
1666
1667 /* upload configuration */
1668 ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3);
1669 if (ret)
1670 return ret;
1671
David Brazdil0f672f62019-12-10 10:32:29 +00001672 ret = tb_lc_configure_link(sw);
1673 if (ret)
1674 return ret;
1675
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001676 return tb_plug_events_active(sw, true);
1677}
1678
David Brazdil0f672f62019-12-10 10:32:29 +00001679static int tb_switch_set_uuid(struct tb_switch *sw)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001680{
1681 u32 uuid[4];
David Brazdil0f672f62019-12-10 10:32:29 +00001682 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001683
1684 if (sw->uuid)
David Brazdil0f672f62019-12-10 10:32:29 +00001685 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001686
1687 /*
1688 * The newer controllers include fused UUID as part of link
1689 * controller specific registers
1690 */
David Brazdil0f672f62019-12-10 10:32:29 +00001691 ret = tb_lc_read_uuid(sw, uuid);
1692 if (ret) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001693 /*
1694 * ICM generates UUID based on UID and fills the upper
1695 * two words with ones. This is not strictly following
1696 * UUID format but we want to be compatible with it so
1697 * we do the same here.
1698 */
1699 uuid[0] = sw->uid & 0xffffffff;
1700 uuid[1] = (sw->uid >> 32) & 0xffffffff;
1701 uuid[2] = 0xffffffff;
1702 uuid[3] = 0xffffffff;
1703 }
1704
1705 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
David Brazdil0f672f62019-12-10 10:32:29 +00001706 if (!sw->uuid)
1707 return -ENOMEM;
1708 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001709}
1710
1711static int tb_switch_add_dma_port(struct tb_switch *sw)
1712{
1713 u32 status;
1714 int ret;
1715
1716 switch (sw->generation) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001717 case 2:
1718 /* Only root switch can be upgraded */
1719 if (tb_route(sw))
1720 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001721
1722 /* fallthrough */
1723 case 3:
1724 ret = tb_switch_set_uuid(sw);
1725 if (ret)
1726 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001727 break;
1728
1729 default:
1730 /*
1731 * DMA port is the only thing available when the switch
1732 * is in safe mode.
1733 */
1734 if (!sw->safe_mode)
1735 return 0;
1736 break;
1737 }
1738
David Brazdil0f672f62019-12-10 10:32:29 +00001739 /* Root switch DMA port requires running firmware */
1740 if (!tb_route(sw) && sw->config.enabled)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001741 return 0;
1742
1743 sw->dma_port = dma_port_alloc(sw);
1744 if (!sw->dma_port)
1745 return 0;
1746
David Brazdil0f672f62019-12-10 10:32:29 +00001747 if (sw->no_nvm_upgrade)
1748 return 0;
1749
1750 /*
1751 * If there is status already set then authentication failed
1752 * when the dma_port_flash_update_auth() returned. Power cycling
1753 * is not needed (it was done already) so only thing we do here
1754 * is to unblock runtime PM of the root port.
1755 */
1756 nvm_get_auth_status(sw, &status);
1757 if (status) {
1758 if (!tb_route(sw))
1759 nvm_authenticate_complete(sw);
1760 return 0;
1761 }
1762
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001763 /*
1764 * Check status of the previous flash authentication. If there
1765 * is one we need to power cycle the switch in any case to make
1766 * it functional again.
1767 */
1768 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
1769 if (ret <= 0)
1770 return ret;
1771
1772 /* Now we can allow root port to suspend again */
1773 if (!tb_route(sw))
1774 nvm_authenticate_complete(sw);
1775
1776 if (status) {
1777 tb_sw_info(sw, "switch flash authentication failed\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001778 nvm_set_auth_status(sw, status);
1779 }
1780
1781 tb_sw_info(sw, "power cycling the switch now\n");
1782 dma_port_power_cycle(sw->dma_port);
1783
1784 /*
1785 * We return error here which causes the switch adding failure.
1786 * It should appear back after power cycle is complete.
1787 */
1788 return -ESHUTDOWN;
1789}
1790
1791/**
1792 * tb_switch_add() - Add a switch to the domain
1793 * @sw: Switch to add
1794 *
1795 * This is the last step in adding switch to the domain. It will read
1796 * identification information from DROM and initializes ports so that
1797 * they can be used to connect other switches. The switch will be
1798 * exposed to the userspace when this function successfully returns. To
1799 * remove and release the switch, call tb_switch_remove().
1800 *
1801 * Return: %0 in case of success and negative errno in case of failure
1802 */
1803int tb_switch_add(struct tb_switch *sw)
1804{
1805 int i, ret;
1806
1807 /*
1808 * Initialize DMA control port now before we read DROM. Recent
1809 * host controllers have more complete DROM on NVM that includes
1810 * vendor and model identification strings which we then expose
1811 * to the userspace. NVM can be accessed through DMA
1812 * configuration based mailbox.
1813 */
1814 ret = tb_switch_add_dma_port(sw);
1815 if (ret)
1816 return ret;
1817
1818 if (!sw->safe_mode) {
1819 /* read drom */
1820 ret = tb_drom_read(sw);
1821 if (ret) {
1822 tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
1823 return ret;
1824 }
David Brazdil0f672f62019-12-10 10:32:29 +00001825 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001826
David Brazdil0f672f62019-12-10 10:32:29 +00001827 ret = tb_switch_set_uuid(sw);
1828 if (ret)
1829 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001830
1831 for (i = 0; i <= sw->config.max_port_number; i++) {
1832 if (sw->ports[i].disabled) {
David Brazdil0f672f62019-12-10 10:32:29 +00001833 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001834 continue;
1835 }
1836 ret = tb_init_port(&sw->ports[i]);
1837 if (ret)
1838 return ret;
1839 }
1840 }
1841
1842 ret = device_add(&sw->dev);
1843 if (ret)
1844 return ret;
1845
David Brazdil0f672f62019-12-10 10:32:29 +00001846 if (tb_route(sw)) {
1847 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
1848 sw->vendor, sw->device);
1849 if (sw->vendor_name && sw->device_name)
1850 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
1851 sw->device_name);
1852 }
1853
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001854 ret = tb_switch_nvm_add(sw);
1855 if (ret) {
1856 device_del(&sw->dev);
1857 return ret;
1858 }
1859
1860 pm_runtime_set_active(&sw->dev);
1861 if (sw->rpm) {
1862 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
1863 pm_runtime_use_autosuspend(&sw->dev);
1864 pm_runtime_mark_last_busy(&sw->dev);
1865 pm_runtime_enable(&sw->dev);
1866 pm_request_autosuspend(&sw->dev);
1867 }
1868
1869 return 0;
1870}
1871
1872/**
1873 * tb_switch_remove() - Remove and release a switch
1874 * @sw: Switch to remove
1875 *
1876 * This will remove the switch from the domain and release it after last
1877 * reference count drops to zero. If there are switches connected below
1878 * this switch, they will be removed as well.
1879 */
1880void tb_switch_remove(struct tb_switch *sw)
1881{
1882 int i;
1883
1884 if (sw->rpm) {
1885 pm_runtime_get_sync(&sw->dev);
1886 pm_runtime_disable(&sw->dev);
1887 }
1888
1889 /* port 0 is the switch itself and never has a remote */
1890 for (i = 1; i <= sw->config.max_port_number; i++) {
David Brazdil0f672f62019-12-10 10:32:29 +00001891 if (tb_port_has_remote(&sw->ports[i])) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001892 tb_switch_remove(sw->ports[i].remote->sw);
David Brazdil0f672f62019-12-10 10:32:29 +00001893 sw->ports[i].remote = NULL;
1894 } else if (sw->ports[i].xdomain) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001895 tb_xdomain_remove(sw->ports[i].xdomain);
David Brazdil0f672f62019-12-10 10:32:29 +00001896 sw->ports[i].xdomain = NULL;
1897 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001898 }
1899
1900 if (!sw->is_unplugged)
1901 tb_plug_events_active(sw, false);
David Brazdil0f672f62019-12-10 10:32:29 +00001902 tb_lc_unconfigure_link(sw);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001903
1904 tb_switch_nvm_remove(sw);
David Brazdil0f672f62019-12-10 10:32:29 +00001905
1906 if (tb_route(sw))
1907 dev_info(&sw->dev, "device disconnected\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001908 device_unregister(&sw->dev);
1909}
1910
1911/**
1912 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
1913 */
1914void tb_sw_set_unplugged(struct tb_switch *sw)
1915{
1916 int i;
1917 if (sw == sw->tb->root_switch) {
1918 tb_sw_WARN(sw, "cannot unplug root switch\n");
1919 return;
1920 }
1921 if (sw->is_unplugged) {
1922 tb_sw_WARN(sw, "is_unplugged already set\n");
1923 return;
1924 }
1925 sw->is_unplugged = true;
1926 for (i = 0; i <= sw->config.max_port_number; i++) {
David Brazdil0f672f62019-12-10 10:32:29 +00001927 if (tb_port_has_remote(&sw->ports[i]))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001928 tb_sw_set_unplugged(sw->ports[i].remote->sw);
David Brazdil0f672f62019-12-10 10:32:29 +00001929 else if (sw->ports[i].xdomain)
1930 sw->ports[i].xdomain->is_unplugged = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001931 }
1932}
1933
1934int tb_switch_resume(struct tb_switch *sw)
1935{
1936 int i, err;
David Brazdil0f672f62019-12-10 10:32:29 +00001937 tb_sw_dbg(sw, "resuming switch\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001938
1939 /*
1940 * Check for UID of the connected switches except for root
1941 * switch which we assume cannot be removed.
1942 */
1943 if (tb_route(sw)) {
1944 u64 uid;
1945
David Brazdil0f672f62019-12-10 10:32:29 +00001946 /*
1947 * Check first that we can still read the switch config
1948 * space. It may be that there is now another domain
1949 * connected.
1950 */
1951 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
1952 if (err < 0) {
1953 tb_sw_info(sw, "switch not present anymore\n");
1954 return err;
1955 }
1956
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001957 err = tb_drom_read_uid_only(sw, &uid);
1958 if (err) {
1959 tb_sw_warn(sw, "uid read failed\n");
1960 return err;
1961 }
1962 if (sw->uid != uid) {
1963 tb_sw_info(sw,
1964 "changed while suspended (uid %#llx -> %#llx)\n",
1965 sw->uid, uid);
1966 return -ENODEV;
1967 }
1968 }
1969
1970 /* upload configuration */
1971 err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3);
1972 if (err)
1973 return err;
1974
David Brazdil0f672f62019-12-10 10:32:29 +00001975 err = tb_lc_configure_link(sw);
1976 if (err)
1977 return err;
1978
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001979 err = tb_plug_events_active(sw, true);
1980 if (err)
1981 return err;
1982
1983 /* check for surviving downstream switches */
1984 for (i = 1; i <= sw->config.max_port_number; i++) {
1985 struct tb_port *port = &sw->ports[i];
David Brazdil0f672f62019-12-10 10:32:29 +00001986
1987 if (!tb_port_has_remote(port) && !port->xdomain)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001988 continue;
David Brazdil0f672f62019-12-10 10:32:29 +00001989
1990 if (tb_wait_for_port(port, true) <= 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001991 tb_port_warn(port,
1992 "lost during suspend, disconnecting\n");
David Brazdil0f672f62019-12-10 10:32:29 +00001993 if (tb_port_has_remote(port))
1994 tb_sw_set_unplugged(port->remote->sw);
1995 else if (port->xdomain)
1996 port->xdomain->is_unplugged = true;
1997 } else if (tb_port_has_remote(port)) {
1998 if (tb_switch_resume(port->remote->sw)) {
1999 tb_port_warn(port,
2000 "lost during suspend, disconnecting\n");
2001 tb_sw_set_unplugged(port->remote->sw);
2002 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002003 }
2004 }
2005 return 0;
2006}
2007
2008void tb_switch_suspend(struct tb_switch *sw)
2009{
2010 int i, err;
2011 err = tb_plug_events_active(sw, false);
2012 if (err)
2013 return;
2014
2015 for (i = 1; i <= sw->config.max_port_number; i++) {
David Brazdil0f672f62019-12-10 10:32:29 +00002016 if (tb_port_has_remote(&sw->ports[i]))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002017 tb_switch_suspend(sw->ports[i].remote->sw);
2018 }
David Brazdil0f672f62019-12-10 10:32:29 +00002019
2020 tb_lc_set_sleep(sw);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002021}
2022
2023struct tb_sw_lookup {
2024 struct tb *tb;
2025 u8 link;
2026 u8 depth;
2027 const uuid_t *uuid;
2028 u64 route;
2029};
2030
David Brazdil0f672f62019-12-10 10:32:29 +00002031static int tb_switch_match(struct device *dev, const void *data)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002032{
2033 struct tb_switch *sw = tb_to_switch(dev);
David Brazdil0f672f62019-12-10 10:32:29 +00002034 const struct tb_sw_lookup *lookup = data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002035
2036 if (!sw)
2037 return 0;
2038 if (sw->tb != lookup->tb)
2039 return 0;
2040
2041 if (lookup->uuid)
2042 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
2043
2044 if (lookup->route) {
2045 return sw->config.route_lo == lower_32_bits(lookup->route) &&
2046 sw->config.route_hi == upper_32_bits(lookup->route);
2047 }
2048
2049 /* Root switch is matched only by depth */
2050 if (!lookup->depth)
2051 return !sw->depth;
2052
2053 return sw->link == lookup->link && sw->depth == lookup->depth;
2054}
2055
2056/**
2057 * tb_switch_find_by_link_depth() - Find switch by link and depth
2058 * @tb: Domain the switch belongs
2059 * @link: Link number the switch is connected
2060 * @depth: Depth of the switch in link
2061 *
2062 * Returned switch has reference count increased so the caller needs to
2063 * call tb_switch_put() when done with the switch.
2064 */
2065struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
2066{
2067 struct tb_sw_lookup lookup;
2068 struct device *dev;
2069
2070 memset(&lookup, 0, sizeof(lookup));
2071 lookup.tb = tb;
2072 lookup.link = link;
2073 lookup.depth = depth;
2074
2075 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2076 if (dev)
2077 return tb_to_switch(dev);
2078
2079 return NULL;
2080}
2081
2082/**
2083 * tb_switch_find_by_uuid() - Find switch by UUID
2084 * @tb: Domain the switch belongs
2085 * @uuid: UUID to look for
2086 *
2087 * Returned switch has reference count increased so the caller needs to
2088 * call tb_switch_put() when done with the switch.
2089 */
2090struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
2091{
2092 struct tb_sw_lookup lookup;
2093 struct device *dev;
2094
2095 memset(&lookup, 0, sizeof(lookup));
2096 lookup.tb = tb;
2097 lookup.uuid = uuid;
2098
2099 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2100 if (dev)
2101 return tb_to_switch(dev);
2102
2103 return NULL;
2104}
2105
2106/**
2107 * tb_switch_find_by_route() - Find switch by route string
2108 * @tb: Domain the switch belongs
2109 * @route: Route string to look for
2110 *
2111 * Returned switch has reference count increased so the caller needs to
2112 * call tb_switch_put() when done with the switch.
2113 */
2114struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
2115{
2116 struct tb_sw_lookup lookup;
2117 struct device *dev;
2118
2119 if (!route)
2120 return tb_switch_get(tb->root_switch);
2121
2122 memset(&lookup, 0, sizeof(lookup));
2123 lookup.tb = tb;
2124 lookup.route = route;
2125
2126 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2127 if (dev)
2128 return tb_to_switch(dev);
2129
2130 return NULL;
2131}
2132
2133void tb_switch_exit(void)
2134{
2135 ida_destroy(&nvm_ida);
2136}