Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1 | // SPDX-License-Identifier: MIT |
| 2 | /* |
| 3 | * AMD Trusted Execution Environment (TEE) interface |
| 4 | * |
| 5 | * Author: Rijo Thomas <Rijo-john.Thomas@amd.com> |
| 6 | * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com> |
| 7 | * |
| 8 | * Copyright 2019 Advanced Micro Devices, Inc. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/types.h> |
| 12 | #include <linux/mutex.h> |
| 13 | #include <linux/delay.h> |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/gfp.h> |
| 16 | #include <linux/psp-sev.h> |
| 17 | #include <linux/psp-tee.h> |
| 18 | |
| 19 | #include "psp-dev.h" |
| 20 | #include "tee-dev.h" |
| 21 | |
| 22 | static bool psp_dead; |
| 23 | |
| 24 | static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size) |
| 25 | { |
| 26 | struct ring_buf_manager *rb_mgr = &tee->rb_mgr; |
| 27 | void *start_addr; |
| 28 | |
| 29 | if (!ring_size) |
| 30 | return -EINVAL; |
| 31 | |
| 32 | /* We need actual physical address instead of DMA address, since |
| 33 | * Trusted OS running on AMD Secure Processor will map this region |
| 34 | */ |
| 35 | start_addr = (void *)__get_free_pages(GFP_KERNEL, get_order(ring_size)); |
| 36 | if (!start_addr) |
| 37 | return -ENOMEM; |
| 38 | |
| 39 | memset(start_addr, 0x0, ring_size); |
| 40 | rb_mgr->ring_start = start_addr; |
| 41 | rb_mgr->ring_size = ring_size; |
| 42 | rb_mgr->ring_pa = __psp_pa(start_addr); |
| 43 | mutex_init(&rb_mgr->mutex); |
| 44 | |
| 45 | return 0; |
| 46 | } |
| 47 | |
| 48 | static void tee_free_ring(struct psp_tee_device *tee) |
| 49 | { |
| 50 | struct ring_buf_manager *rb_mgr = &tee->rb_mgr; |
| 51 | |
| 52 | if (!rb_mgr->ring_start) |
| 53 | return; |
| 54 | |
| 55 | free_pages((unsigned long)rb_mgr->ring_start, |
| 56 | get_order(rb_mgr->ring_size)); |
| 57 | |
| 58 | rb_mgr->ring_start = NULL; |
| 59 | rb_mgr->ring_size = 0; |
| 60 | rb_mgr->ring_pa = 0; |
| 61 | mutex_destroy(&rb_mgr->mutex); |
| 62 | } |
| 63 | |
| 64 | static int tee_wait_cmd_poll(struct psp_tee_device *tee, unsigned int timeout, |
| 65 | unsigned int *reg) |
| 66 | { |
| 67 | /* ~10ms sleep per loop => nloop = timeout * 100 */ |
| 68 | int nloop = timeout * 100; |
| 69 | |
| 70 | while (--nloop) { |
| 71 | *reg = ioread32(tee->io_regs + tee->vdata->cmdresp_reg); |
| 72 | if (*reg & PSP_CMDRESP_RESP) |
| 73 | return 0; |
| 74 | |
| 75 | usleep_range(10000, 10100); |
| 76 | } |
| 77 | |
| 78 | dev_err(tee->dev, "tee: command timed out, disabling PSP\n"); |
| 79 | psp_dead = true; |
| 80 | |
| 81 | return -ETIMEDOUT; |
| 82 | } |
| 83 | |
| 84 | static |
| 85 | struct tee_init_ring_cmd *tee_alloc_cmd_buffer(struct psp_tee_device *tee) |
| 86 | { |
| 87 | struct tee_init_ring_cmd *cmd; |
| 88 | |
| 89 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); |
| 90 | if (!cmd) |
| 91 | return NULL; |
| 92 | |
| 93 | cmd->hi_addr = upper_32_bits(tee->rb_mgr.ring_pa); |
| 94 | cmd->low_addr = lower_32_bits(tee->rb_mgr.ring_pa); |
| 95 | cmd->size = tee->rb_mgr.ring_size; |
| 96 | |
| 97 | dev_dbg(tee->dev, "tee: ring address: high = 0x%x low = 0x%x size = %u\n", |
| 98 | cmd->hi_addr, cmd->low_addr, cmd->size); |
| 99 | |
| 100 | return cmd; |
| 101 | } |
| 102 | |
| 103 | static inline void tee_free_cmd_buffer(struct tee_init_ring_cmd *cmd) |
| 104 | { |
| 105 | kfree(cmd); |
| 106 | } |
| 107 | |
| 108 | static int tee_init_ring(struct psp_tee_device *tee) |
| 109 | { |
| 110 | int ring_size = MAX_RING_BUFFER_ENTRIES * sizeof(struct tee_ring_cmd); |
| 111 | struct tee_init_ring_cmd *cmd; |
| 112 | phys_addr_t cmd_buffer; |
| 113 | unsigned int reg; |
| 114 | int ret; |
| 115 | |
| 116 | BUILD_BUG_ON(sizeof(struct tee_ring_cmd) != 1024); |
| 117 | |
| 118 | ret = tee_alloc_ring(tee, ring_size); |
| 119 | if (ret) { |
| 120 | dev_err(tee->dev, "tee: ring allocation failed %d\n", ret); |
| 121 | return ret; |
| 122 | } |
| 123 | |
| 124 | tee->rb_mgr.wptr = 0; |
| 125 | |
| 126 | cmd = tee_alloc_cmd_buffer(tee); |
| 127 | if (!cmd) { |
| 128 | tee_free_ring(tee); |
| 129 | return -ENOMEM; |
| 130 | } |
| 131 | |
| 132 | cmd_buffer = __psp_pa((void *)cmd); |
| 133 | |
| 134 | /* Send command buffer details to Trusted OS by writing to |
| 135 | * CPU-PSP message registers |
| 136 | */ |
| 137 | |
| 138 | iowrite32(lower_32_bits(cmd_buffer), |
| 139 | tee->io_regs + tee->vdata->cmdbuff_addr_lo_reg); |
| 140 | iowrite32(upper_32_bits(cmd_buffer), |
| 141 | tee->io_regs + tee->vdata->cmdbuff_addr_hi_reg); |
| 142 | iowrite32(TEE_RING_INIT_CMD, |
| 143 | tee->io_regs + tee->vdata->cmdresp_reg); |
| 144 | |
| 145 | ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, ®); |
| 146 | if (ret) { |
| 147 | dev_err(tee->dev, "tee: ring init command timed out\n"); |
| 148 | tee_free_ring(tee); |
| 149 | goto free_buf; |
| 150 | } |
| 151 | |
| 152 | if (reg & PSP_CMDRESP_ERR_MASK) { |
| 153 | dev_err(tee->dev, "tee: ring init command failed (%#010x)\n", |
| 154 | reg & PSP_CMDRESP_ERR_MASK); |
| 155 | tee_free_ring(tee); |
| 156 | ret = -EIO; |
| 157 | } |
| 158 | |
| 159 | free_buf: |
| 160 | tee_free_cmd_buffer(cmd); |
| 161 | |
| 162 | return ret; |
| 163 | } |
| 164 | |
| 165 | static void tee_destroy_ring(struct psp_tee_device *tee) |
| 166 | { |
| 167 | unsigned int reg; |
| 168 | int ret; |
| 169 | |
| 170 | if (!tee->rb_mgr.ring_start) |
| 171 | return; |
| 172 | |
| 173 | if (psp_dead) |
| 174 | goto free_ring; |
| 175 | |
| 176 | iowrite32(TEE_RING_DESTROY_CMD, |
| 177 | tee->io_regs + tee->vdata->cmdresp_reg); |
| 178 | |
| 179 | ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, ®); |
| 180 | if (ret) { |
| 181 | dev_err(tee->dev, "tee: ring destroy command timed out\n"); |
| 182 | } else if (reg & PSP_CMDRESP_ERR_MASK) { |
| 183 | dev_err(tee->dev, "tee: ring destroy command failed (%#010x)\n", |
| 184 | reg & PSP_CMDRESP_ERR_MASK); |
| 185 | } |
| 186 | |
| 187 | free_ring: |
| 188 | tee_free_ring(tee); |
| 189 | } |
| 190 | |
| 191 | int tee_dev_init(struct psp_device *psp) |
| 192 | { |
| 193 | struct device *dev = psp->dev; |
| 194 | struct psp_tee_device *tee; |
| 195 | int ret; |
| 196 | |
| 197 | ret = -ENOMEM; |
| 198 | tee = devm_kzalloc(dev, sizeof(*tee), GFP_KERNEL); |
| 199 | if (!tee) |
| 200 | goto e_err; |
| 201 | |
| 202 | psp->tee_data = tee; |
| 203 | |
| 204 | tee->dev = dev; |
| 205 | tee->psp = psp; |
| 206 | |
| 207 | tee->io_regs = psp->io_regs; |
| 208 | |
| 209 | tee->vdata = (struct tee_vdata *)psp->vdata->tee; |
| 210 | if (!tee->vdata) { |
| 211 | ret = -ENODEV; |
| 212 | dev_err(dev, "tee: missing driver data\n"); |
| 213 | goto e_err; |
| 214 | } |
| 215 | |
| 216 | ret = tee_init_ring(tee); |
| 217 | if (ret) { |
| 218 | dev_err(dev, "tee: failed to init ring buffer\n"); |
| 219 | goto e_err; |
| 220 | } |
| 221 | |
| 222 | dev_notice(dev, "tee enabled\n"); |
| 223 | |
| 224 | return 0; |
| 225 | |
| 226 | e_err: |
| 227 | psp->tee_data = NULL; |
| 228 | |
| 229 | dev_notice(dev, "tee initialization failed\n"); |
| 230 | |
| 231 | return ret; |
| 232 | } |
| 233 | |
| 234 | void tee_dev_destroy(struct psp_device *psp) |
| 235 | { |
| 236 | struct psp_tee_device *tee = psp->tee_data; |
| 237 | |
| 238 | if (!tee) |
| 239 | return; |
| 240 | |
| 241 | tee_destroy_ring(tee); |
| 242 | } |
| 243 | |
| 244 | static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id, |
| 245 | void *buf, size_t len, struct tee_ring_cmd **resp) |
| 246 | { |
| 247 | struct tee_ring_cmd *cmd; |
| 248 | int nloop = 1000, ret = 0; |
| 249 | u32 rptr; |
| 250 | |
| 251 | *resp = NULL; |
| 252 | |
| 253 | mutex_lock(&tee->rb_mgr.mutex); |
| 254 | |
| 255 | /* Loop until empty entry found in ring buffer */ |
| 256 | do { |
| 257 | /* Get pointer to ring buffer command entry */ |
| 258 | cmd = (struct tee_ring_cmd *) |
| 259 | (tee->rb_mgr.ring_start + tee->rb_mgr.wptr); |
| 260 | |
| 261 | rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg); |
| 262 | |
| 263 | /* Check if ring buffer is full or command entry is waiting |
| 264 | * for response from TEE |
| 265 | */ |
| 266 | if (!(tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr || |
| 267 | cmd->flag == CMD_WAITING_FOR_RESPONSE)) |
| 268 | break; |
| 269 | |
| 270 | dev_dbg(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n", |
| 271 | rptr, tee->rb_mgr.wptr); |
| 272 | |
| 273 | /* Wait if ring buffer is full or TEE is processing data */ |
| 274 | mutex_unlock(&tee->rb_mgr.mutex); |
| 275 | schedule_timeout_interruptible(msecs_to_jiffies(10)); |
| 276 | mutex_lock(&tee->rb_mgr.mutex); |
| 277 | |
| 278 | } while (--nloop); |
| 279 | |
| 280 | if (!nloop && |
| 281 | (tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr || |
| 282 | cmd->flag == CMD_WAITING_FOR_RESPONSE)) { |
| 283 | dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u response flag %u\n", |
| 284 | rptr, tee->rb_mgr.wptr, cmd->flag); |
| 285 | ret = -EBUSY; |
| 286 | goto unlock; |
| 287 | } |
| 288 | |
| 289 | /* Do not submit command if PSP got disabled while processing any |
| 290 | * command in another thread |
| 291 | */ |
| 292 | if (psp_dead) { |
| 293 | ret = -EBUSY; |
| 294 | goto unlock; |
| 295 | } |
| 296 | |
| 297 | /* Write command data into ring buffer */ |
| 298 | cmd->cmd_id = cmd_id; |
| 299 | cmd->cmd_state = TEE_CMD_STATE_INIT; |
| 300 | memset(&cmd->buf[0], 0, sizeof(cmd->buf)); |
| 301 | memcpy(&cmd->buf[0], buf, len); |
| 302 | |
| 303 | /* Indicate driver is waiting for response */ |
| 304 | cmd->flag = CMD_WAITING_FOR_RESPONSE; |
| 305 | |
| 306 | /* Update local copy of write pointer */ |
| 307 | tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd); |
| 308 | if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size) |
| 309 | tee->rb_mgr.wptr = 0; |
| 310 | |
| 311 | /* Trigger interrupt to Trusted OS */ |
| 312 | iowrite32(tee->rb_mgr.wptr, tee->io_regs + tee->vdata->ring_wptr_reg); |
| 313 | |
| 314 | /* The response is provided by Trusted OS in same |
| 315 | * location as submitted data entry within ring buffer. |
| 316 | */ |
| 317 | *resp = cmd; |
| 318 | |
| 319 | unlock: |
| 320 | mutex_unlock(&tee->rb_mgr.mutex); |
| 321 | |
| 322 | return ret; |
| 323 | } |
| 324 | |
| 325 | static int tee_wait_cmd_completion(struct psp_tee_device *tee, |
| 326 | struct tee_ring_cmd *resp, |
| 327 | unsigned int timeout) |
| 328 | { |
| 329 | /* ~5ms sleep per loop => nloop = timeout * 200 */ |
| 330 | int nloop = timeout * 200; |
| 331 | |
| 332 | while (--nloop) { |
| 333 | if (resp->cmd_state == TEE_CMD_STATE_COMPLETED) |
| 334 | return 0; |
| 335 | |
| 336 | usleep_range(5000, 5100); |
| 337 | } |
| 338 | |
| 339 | dev_err(tee->dev, "tee: command 0x%x timed out, disabling PSP\n", |
| 340 | resp->cmd_id); |
| 341 | |
| 342 | psp_dead = true; |
| 343 | |
| 344 | return -ETIMEDOUT; |
| 345 | } |
| 346 | |
| 347 | int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len, |
| 348 | u32 *status) |
| 349 | { |
| 350 | struct psp_device *psp = psp_get_master_device(); |
| 351 | struct psp_tee_device *tee; |
| 352 | struct tee_ring_cmd *resp; |
| 353 | int ret; |
| 354 | |
| 355 | if (!buf || !status || !len || len > sizeof(resp->buf)) |
| 356 | return -EINVAL; |
| 357 | |
| 358 | *status = 0; |
| 359 | |
| 360 | if (!psp || !psp->tee_data) |
| 361 | return -ENODEV; |
| 362 | |
| 363 | if (psp_dead) |
| 364 | return -EBUSY; |
| 365 | |
| 366 | tee = psp->tee_data; |
| 367 | |
| 368 | ret = tee_submit_cmd(tee, cmd_id, buf, len, &resp); |
| 369 | if (ret) |
| 370 | return ret; |
| 371 | |
| 372 | ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT); |
| 373 | if (ret) { |
| 374 | resp->flag = CMD_RESPONSE_TIMEDOUT; |
| 375 | return ret; |
| 376 | } |
| 377 | |
| 378 | memcpy(buf, &resp->buf[0], len); |
| 379 | *status = resp->status; |
| 380 | |
| 381 | resp->flag = CMD_RESPONSE_COPIED; |
| 382 | |
| 383 | return 0; |
| 384 | } |
| 385 | EXPORT_SYMBOL(psp_tee_process_cmd); |
| 386 | |
| 387 | int psp_check_tee_status(void) |
| 388 | { |
| 389 | struct psp_device *psp = psp_get_master_device(); |
| 390 | |
| 391 | if (!psp || !psp->tee_data) |
| 392 | return -ENODEV; |
| 393 | |
| 394 | return 0; |
| 395 | } |
| 396 | EXPORT_SYMBOL(psp_check_tee_status); |