Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | // |
| 3 | // Register map access API - SPI AVMM support |
| 4 | // |
| 5 | // Copyright (C) 2018-2020 Intel Corporation. All rights reserved. |
| 6 | |
| 7 | #include <linux/module.h> |
| 8 | #include <linux/regmap.h> |
| 9 | #include <linux/spi/spi.h> |
| 10 | |
| 11 | /* |
| 12 | * This driver implements the regmap operations for a generic SPI |
| 13 | * master to access the registers of the spi slave chip which has an |
| 14 | * Avalone bus in it. |
| 15 | * |
| 16 | * The "SPI slave to Avalon Master Bridge" (spi-avmm) IP should be integrated |
| 17 | * in the spi slave chip. The IP acts as a bridge to convert encoded streams of |
| 18 | * bytes from the host to the internal register read/write on Avalon bus. In |
| 19 | * order to issue register access requests to the slave chip, the host should |
| 20 | * send formatted bytes that conform to the transfer protocol. |
| 21 | * The transfer protocol contains 3 layers: transaction layer, packet layer |
| 22 | * and physical layer. |
| 23 | * |
| 24 | * Reference Documents could be found at: |
| 25 | * https://www.intel.com/content/www/us/en/programmable/documentation/sfo1400787952932.html |
| 26 | * |
| 27 | * Chapter "SPI Slave/JTAG to Avalon Master Bridge Cores" is a general |
| 28 | * introduction to the protocol. |
| 29 | * |
| 30 | * Chapter "Avalon Packets to Transactions Converter Core" describes |
| 31 | * the transaction layer. |
| 32 | * |
| 33 | * Chapter "Avalon-ST Bytes to Packets and Packets to Bytes Converter Cores" |
| 34 | * describes the packet layer. |
| 35 | * |
| 36 | * Chapter "Avalon-ST Serial Peripheral Interface Core" describes the |
| 37 | * physical layer. |
| 38 | * |
| 39 | * |
| 40 | * When host issues a regmap read/write, the driver will transform the request |
| 41 | * to byte stream layer by layer. It formats the register addr, value and |
| 42 | * length to the transaction layer request, then converts the request to packet |
| 43 | * layer bytes stream and then to physical layer bytes stream. Finally the |
| 44 | * driver sends the formatted byte stream over SPI bus to the slave chip. |
| 45 | * |
| 46 | * The spi-avmm IP on the slave chip decodes the byte stream and initiates |
| 47 | * register read/write on its internal Avalon bus, and then encodes the |
| 48 | * response to byte stream and sends back to host. |
| 49 | * |
| 50 | * The driver receives the byte stream, reverses the 3 layers transformation, |
| 51 | * and finally gets the response value (read out data for register read, |
| 52 | * successful written size for register write). |
| 53 | */ |
| 54 | |
| 55 | #define PKT_SOP 0x7a |
| 56 | #define PKT_EOP 0x7b |
| 57 | #define PKT_CHANNEL 0x7c |
| 58 | #define PKT_ESC 0x7d |
| 59 | |
| 60 | #define PHY_IDLE 0x4a |
| 61 | #define PHY_ESC 0x4d |
| 62 | |
| 63 | #define TRANS_CODE_WRITE 0x0 |
| 64 | #define TRANS_CODE_SEQ_WRITE 0x4 |
| 65 | #define TRANS_CODE_READ 0x10 |
| 66 | #define TRANS_CODE_SEQ_READ 0x14 |
| 67 | #define TRANS_CODE_NO_TRANS 0x7f |
| 68 | |
| 69 | #define SPI_AVMM_XFER_TIMEOUT (msecs_to_jiffies(200)) |
| 70 | |
| 71 | /* slave's register addr is 32 bits */ |
| 72 | #define SPI_AVMM_REG_SIZE 4UL |
| 73 | /* slave's register value is 32 bits */ |
| 74 | #define SPI_AVMM_VAL_SIZE 4UL |
| 75 | |
| 76 | /* |
| 77 | * max rx size could be larger. But considering the buffer consuming, |
| 78 | * it is proper that we limit 1KB xfer at max. |
| 79 | */ |
| 80 | #define MAX_READ_CNT 256UL |
| 81 | #define MAX_WRITE_CNT 1UL |
| 82 | |
| 83 | struct trans_req_header { |
| 84 | u8 code; |
| 85 | u8 rsvd; |
| 86 | __be16 size; |
| 87 | __be32 addr; |
| 88 | } __packed; |
| 89 | |
| 90 | struct trans_resp_header { |
| 91 | u8 r_code; |
| 92 | u8 rsvd; |
| 93 | __be16 size; |
| 94 | } __packed; |
| 95 | |
| 96 | #define TRANS_REQ_HD_SIZE (sizeof(struct trans_req_header)) |
| 97 | #define TRANS_RESP_HD_SIZE (sizeof(struct trans_resp_header)) |
| 98 | |
| 99 | /* |
| 100 | * In transaction layer, |
| 101 | * the write request format is: Transaction request header + data |
| 102 | * the read request format is: Transaction request header |
| 103 | * the write response format is: Transaction response header |
| 104 | * the read response format is: pure data, no Transaction response header |
| 105 | */ |
| 106 | #define TRANS_WR_TX_SIZE(n) (TRANS_REQ_HD_SIZE + SPI_AVMM_VAL_SIZE * (n)) |
| 107 | #define TRANS_RD_TX_SIZE TRANS_REQ_HD_SIZE |
| 108 | #define TRANS_TX_MAX TRANS_WR_TX_SIZE(MAX_WRITE_CNT) |
| 109 | |
| 110 | #define TRANS_RD_RX_SIZE(n) (SPI_AVMM_VAL_SIZE * (n)) |
| 111 | #define TRANS_WR_RX_SIZE TRANS_RESP_HD_SIZE |
| 112 | #define TRANS_RX_MAX TRANS_RD_RX_SIZE(MAX_READ_CNT) |
| 113 | |
| 114 | /* tx & rx share one transaction layer buffer */ |
| 115 | #define TRANS_BUF_SIZE ((TRANS_TX_MAX > TRANS_RX_MAX) ? \ |
| 116 | TRANS_TX_MAX : TRANS_RX_MAX) |
| 117 | |
| 118 | /* |
| 119 | * In tx phase, the host prepares all the phy layer bytes of a request in the |
| 120 | * phy buffer and sends them in a batch. |
| 121 | * |
| 122 | * The packet layer and physical layer defines several special chars for |
| 123 | * various purpose, when a transaction layer byte hits one of these special |
| 124 | * chars, it should be escaped. The escape rule is, "Escape char first, |
| 125 | * following the byte XOR'ed with 0x20". |
| 126 | * |
| 127 | * This macro defines the max possible length of the phy data. In the worst |
| 128 | * case, all transaction layer bytes need to be escaped (so the data length |
| 129 | * doubles), plus 4 special chars (SOP, CHANNEL, CHANNEL_NUM, EOP). Finally |
| 130 | * we should make sure the length is aligned to SPI BPW. |
| 131 | */ |
| 132 | #define PHY_TX_MAX ALIGN(2 * TRANS_TX_MAX + 4, 4) |
| 133 | |
| 134 | /* |
| 135 | * Unlike tx, phy rx is affected by possible PHY_IDLE bytes from slave, the max |
| 136 | * length of the rx bit stream is unpredictable. So the driver reads the words |
| 137 | * one by one, and parses each word immediately into transaction layer buffer. |
| 138 | * Only one word length of phy buffer is used for rx. |
| 139 | */ |
| 140 | #define PHY_BUF_SIZE PHY_TX_MAX |
| 141 | |
| 142 | /** |
| 143 | * struct spi_avmm_bridge - SPI slave to AVMM bus master bridge |
| 144 | * |
| 145 | * @spi: spi slave associated with this bridge. |
| 146 | * @word_len: bytes of word for spi transfer. |
| 147 | * @trans_len: length of valid data in trans_buf. |
| 148 | * @phy_len: length of valid data in phy_buf. |
| 149 | * @trans_buf: the bridge buffer for transaction layer data. |
| 150 | * @phy_buf: the bridge buffer for physical layer data. |
| 151 | * @swap_words: the word swapping cb for phy data. NULL if not needed. |
| 152 | * |
| 153 | * As a device's registers are implemented on the AVMM bus address space, it |
| 154 | * requires the driver to issue formatted requests to spi slave to AVMM bus |
| 155 | * master bridge to perform register access. |
| 156 | */ |
| 157 | struct spi_avmm_bridge { |
| 158 | struct spi_device *spi; |
| 159 | unsigned char word_len; |
| 160 | unsigned int trans_len; |
| 161 | unsigned int phy_len; |
| 162 | /* bridge buffer used in translation between protocol layers */ |
| 163 | char trans_buf[TRANS_BUF_SIZE]; |
| 164 | char phy_buf[PHY_BUF_SIZE]; |
| 165 | void (*swap_words)(char *buf, unsigned int len); |
| 166 | }; |
| 167 | |
| 168 | static void br_swap_words_32(char *buf, unsigned int len) |
| 169 | { |
| 170 | u32 *p = (u32 *)buf; |
| 171 | unsigned int count; |
| 172 | |
| 173 | count = len / 4; |
| 174 | while (count--) { |
| 175 | *p = swab32p(p); |
| 176 | p++; |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | /* |
| 181 | * Format transaction layer data in br->trans_buf according to the register |
| 182 | * access request, Store valid transaction layer data length in br->trans_len. |
| 183 | */ |
| 184 | static int br_trans_tx_prepare(struct spi_avmm_bridge *br, bool is_read, u32 reg, |
| 185 | u32 *wr_val, u32 count) |
| 186 | { |
| 187 | struct trans_req_header *header; |
| 188 | unsigned int trans_len; |
| 189 | u8 code; |
| 190 | __le32 *data; |
| 191 | int i; |
| 192 | |
| 193 | if (is_read) { |
| 194 | if (count == 1) |
| 195 | code = TRANS_CODE_READ; |
| 196 | else |
| 197 | code = TRANS_CODE_SEQ_READ; |
| 198 | } else { |
| 199 | if (count == 1) |
| 200 | code = TRANS_CODE_WRITE; |
| 201 | else |
| 202 | code = TRANS_CODE_SEQ_WRITE; |
| 203 | } |
| 204 | |
| 205 | header = (struct trans_req_header *)br->trans_buf; |
| 206 | header->code = code; |
| 207 | header->rsvd = 0; |
| 208 | header->size = cpu_to_be16((u16)count * SPI_AVMM_VAL_SIZE); |
| 209 | header->addr = cpu_to_be32(reg); |
| 210 | |
| 211 | trans_len = TRANS_REQ_HD_SIZE; |
| 212 | |
| 213 | if (!is_read) { |
| 214 | trans_len += SPI_AVMM_VAL_SIZE * count; |
| 215 | if (trans_len > sizeof(br->trans_buf)) |
| 216 | return -ENOMEM; |
| 217 | |
| 218 | data = (__le32 *)(br->trans_buf + TRANS_REQ_HD_SIZE); |
| 219 | |
| 220 | for (i = 0; i < count; i++) |
| 221 | *data++ = cpu_to_le32(*wr_val++); |
| 222 | } |
| 223 | |
| 224 | /* Store valid trans data length for next layer */ |
| 225 | br->trans_len = trans_len; |
| 226 | |
| 227 | return 0; |
| 228 | } |
| 229 | |
| 230 | /* |
| 231 | * Convert transaction layer data (in br->trans_buf) to phy layer data, store |
| 232 | * them in br->phy_buf. Pad the phy_buf aligned with SPI's BPW. Store valid phy |
| 233 | * layer data length in br->phy_len. |
| 234 | * |
| 235 | * phy_buf len should be aligned with SPI's BPW. Spare bytes should be padded |
| 236 | * with PHY_IDLE, then the slave will just drop them. |
| 237 | * |
| 238 | * The driver will not simply pad 4a at the tail. The concern is that driver |
| 239 | * will not store MISO data during tx phase, if the driver pads 4a at the tail, |
| 240 | * it is possible that if the slave is fast enough to response at the padding |
| 241 | * time. As a result these rx bytes are lost. In the following case, 7a,7c,00 |
| 242 | * will lost. |
| 243 | * MOSI ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40|4a|4a|4a| |XX|XX|... |
| 244 | * MISO ...|4a|4a|4a|4a| |4a|4a|4a|4a| |4a|4a|4a|4a| |4a|7a|7c|00| |78|56|... |
| 245 | * |
| 246 | * So the driver moves EOP and bytes after EOP to the end of the aligned size, |
| 247 | * then fill the hole with PHY_IDLE. As following: |
| 248 | * before pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40| |
| 249 | * after pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|4a| |4a|4a|7b|40| |
| 250 | * Then if the slave will not get the entire packet before the tx phase is |
| 251 | * over, it can't responsed to anything either. |
| 252 | */ |
| 253 | static int br_pkt_phy_tx_prepare(struct spi_avmm_bridge *br) |
| 254 | { |
| 255 | char *tb, *tb_end, *pb, *pb_limit, *pb_eop = NULL; |
| 256 | unsigned int aligned_phy_len, move_size; |
| 257 | bool need_esc = false; |
| 258 | |
| 259 | tb = br->trans_buf; |
| 260 | tb_end = tb + br->trans_len; |
| 261 | pb = br->phy_buf; |
| 262 | pb_limit = pb + ARRAY_SIZE(br->phy_buf); |
| 263 | |
| 264 | *pb++ = PKT_SOP; |
| 265 | |
| 266 | /* |
| 267 | * The driver doesn't support multiple channels so the channel number |
| 268 | * is always 0. |
| 269 | */ |
| 270 | *pb++ = PKT_CHANNEL; |
| 271 | *pb++ = 0x0; |
| 272 | |
| 273 | for (; pb < pb_limit && tb < tb_end; pb++) { |
| 274 | if (need_esc) { |
| 275 | *pb = *tb++ ^ 0x20; |
| 276 | need_esc = false; |
| 277 | continue; |
| 278 | } |
| 279 | |
| 280 | /* EOP should be inserted before the last valid char */ |
| 281 | if (tb == tb_end - 1 && !pb_eop) { |
| 282 | *pb = PKT_EOP; |
| 283 | pb_eop = pb; |
| 284 | continue; |
| 285 | } |
| 286 | |
| 287 | /* |
| 288 | * insert an ESCAPE char if the data value equals any special |
| 289 | * char. |
| 290 | */ |
| 291 | switch (*tb) { |
| 292 | case PKT_SOP: |
| 293 | case PKT_EOP: |
| 294 | case PKT_CHANNEL: |
| 295 | case PKT_ESC: |
| 296 | *pb = PKT_ESC; |
| 297 | need_esc = true; |
| 298 | break; |
| 299 | case PHY_IDLE: |
| 300 | case PHY_ESC: |
| 301 | *pb = PHY_ESC; |
| 302 | need_esc = true; |
| 303 | break; |
| 304 | default: |
| 305 | *pb = *tb++; |
| 306 | break; |
| 307 | } |
| 308 | } |
| 309 | |
| 310 | /* The phy buffer is used out but transaction layer data remains */ |
| 311 | if (tb < tb_end) |
| 312 | return -ENOMEM; |
| 313 | |
| 314 | /* Store valid phy data length for spi transfer */ |
| 315 | br->phy_len = pb - br->phy_buf; |
| 316 | |
| 317 | if (br->word_len == 1) |
| 318 | return 0; |
| 319 | |
| 320 | /* Do phy buf padding if word_len > 1 byte. */ |
| 321 | aligned_phy_len = ALIGN(br->phy_len, br->word_len); |
| 322 | if (aligned_phy_len > sizeof(br->phy_buf)) |
| 323 | return -ENOMEM; |
| 324 | |
| 325 | if (aligned_phy_len == br->phy_len) |
| 326 | return 0; |
| 327 | |
| 328 | /* move EOP and bytes after EOP to the end of aligned size */ |
| 329 | move_size = pb - pb_eop; |
| 330 | memmove(&br->phy_buf[aligned_phy_len - move_size], pb_eop, move_size); |
| 331 | |
| 332 | /* fill the hole with PHY_IDLEs */ |
| 333 | memset(pb_eop, PHY_IDLE, aligned_phy_len - br->phy_len); |
| 334 | |
| 335 | /* update the phy data length */ |
| 336 | br->phy_len = aligned_phy_len; |
| 337 | |
| 338 | return 0; |
| 339 | } |
| 340 | |
| 341 | /* |
| 342 | * In tx phase, the slave only returns PHY_IDLE (0x4a). So the driver will |
| 343 | * ignore rx in tx phase. |
| 344 | */ |
| 345 | static int br_do_tx(struct spi_avmm_bridge *br) |
| 346 | { |
| 347 | /* reorder words for spi transfer */ |
| 348 | if (br->swap_words) |
| 349 | br->swap_words(br->phy_buf, br->phy_len); |
| 350 | |
| 351 | /* send all data in phy_buf */ |
| 352 | return spi_write(br->spi, br->phy_buf, br->phy_len); |
| 353 | } |
| 354 | |
| 355 | /* |
| 356 | * This function read the rx byte stream from SPI word by word and convert |
| 357 | * them to transaction layer data in br->trans_buf. It also stores the length |
| 358 | * of rx transaction layer data in br->trans_len |
| 359 | * |
| 360 | * The slave may send an unknown number of PHY_IDLEs in rx phase, so we cannot |
| 361 | * prepare a fixed length buffer to receive all of the rx data in a batch. We |
| 362 | * have to read word by word and convert them to transaction layer data at |
| 363 | * once. |
| 364 | */ |
| 365 | static int br_do_rx_and_pkt_phy_parse(struct spi_avmm_bridge *br) |
| 366 | { |
| 367 | bool eop_found = false, channel_found = false, esc_found = false; |
| 368 | bool valid_word = false, last_try = false; |
| 369 | struct device *dev = &br->spi->dev; |
| 370 | char *pb, *tb_limit, *tb = NULL; |
| 371 | unsigned long poll_timeout; |
| 372 | int ret, i; |
| 373 | |
| 374 | tb_limit = br->trans_buf + ARRAY_SIZE(br->trans_buf); |
| 375 | pb = br->phy_buf; |
| 376 | poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT; |
| 377 | while (tb < tb_limit) { |
| 378 | ret = spi_read(br->spi, pb, br->word_len); |
| 379 | if (ret) |
| 380 | return ret; |
| 381 | |
| 382 | /* reorder the word back */ |
| 383 | if (br->swap_words) |
| 384 | br->swap_words(pb, br->word_len); |
| 385 | |
| 386 | valid_word = false; |
| 387 | for (i = 0; i < br->word_len; i++) { |
| 388 | /* drop everything before first SOP */ |
| 389 | if (!tb && pb[i] != PKT_SOP) |
| 390 | continue; |
| 391 | |
| 392 | /* drop PHY_IDLE */ |
| 393 | if (pb[i] == PHY_IDLE) |
| 394 | continue; |
| 395 | |
| 396 | valid_word = true; |
| 397 | |
| 398 | /* |
| 399 | * We don't support multiple channels, so error out if |
| 400 | * a non-zero channel number is found. |
| 401 | */ |
| 402 | if (channel_found) { |
| 403 | if (pb[i] != 0) { |
| 404 | dev_err(dev, "%s channel num != 0\n", |
| 405 | __func__); |
| 406 | return -EFAULT; |
| 407 | } |
| 408 | |
| 409 | channel_found = false; |
| 410 | continue; |
| 411 | } |
| 412 | |
| 413 | switch (pb[i]) { |
| 414 | case PKT_SOP: |
| 415 | /* |
| 416 | * reset the parsing if a second SOP appears. |
| 417 | */ |
| 418 | tb = br->trans_buf; |
| 419 | eop_found = false; |
| 420 | channel_found = false; |
| 421 | esc_found = false; |
| 422 | break; |
| 423 | case PKT_EOP: |
| 424 | /* |
| 425 | * No special char is expected after ESC char. |
| 426 | * No special char (except ESC & PHY_IDLE) is |
| 427 | * expected after EOP char. |
| 428 | * |
| 429 | * The special chars are all dropped. |
| 430 | */ |
| 431 | if (esc_found || eop_found) |
| 432 | return -EFAULT; |
| 433 | |
| 434 | eop_found = true; |
| 435 | break; |
| 436 | case PKT_CHANNEL: |
| 437 | if (esc_found || eop_found) |
| 438 | return -EFAULT; |
| 439 | |
| 440 | channel_found = true; |
| 441 | break; |
| 442 | case PKT_ESC: |
| 443 | case PHY_ESC: |
| 444 | if (esc_found) |
| 445 | return -EFAULT; |
| 446 | |
| 447 | esc_found = true; |
| 448 | break; |
| 449 | default: |
| 450 | /* Record the normal byte in trans_buf. */ |
| 451 | if (esc_found) { |
| 452 | *tb++ = pb[i] ^ 0x20; |
| 453 | esc_found = false; |
| 454 | } else { |
| 455 | *tb++ = pb[i]; |
| 456 | } |
| 457 | |
| 458 | /* |
| 459 | * We get the last normal byte after EOP, it is |
| 460 | * time we finish. Normally the function should |
| 461 | * return here. |
| 462 | */ |
| 463 | if (eop_found) { |
| 464 | br->trans_len = tb - br->trans_buf; |
| 465 | return 0; |
| 466 | } |
| 467 | } |
| 468 | } |
| 469 | |
| 470 | if (valid_word) { |
| 471 | /* update poll timeout when we get valid word */ |
| 472 | poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT; |
| 473 | last_try = false; |
| 474 | } else { |
| 475 | /* |
| 476 | * We timeout when rx keeps invalid for some time. But |
| 477 | * it is possible we are scheduled out for long time |
| 478 | * after a spi_read. So when we are scheduled in, a SW |
| 479 | * timeout happens. But actually HW may have worked fine and |
| 480 | * has been ready long time ago. So we need to do an extra |
| 481 | * read, if we get a valid word then we could continue rx, |
| 482 | * otherwise real a HW issue happens. |
| 483 | */ |
| 484 | if (last_try) |
| 485 | return -ETIMEDOUT; |
| 486 | |
| 487 | if (time_after(jiffies, poll_timeout)) |
| 488 | last_try = true; |
| 489 | } |
| 490 | } |
| 491 | |
| 492 | /* |
| 493 | * We have used out all transfer layer buffer but cannot find the end |
| 494 | * of the byte stream. |
| 495 | */ |
| 496 | dev_err(dev, "%s transfer buffer is full but rx doesn't end\n", |
| 497 | __func__); |
| 498 | |
| 499 | return -EFAULT; |
| 500 | } |
| 501 | |
| 502 | /* |
| 503 | * For read transactions, the avmm bus will directly return register values |
| 504 | * without transaction response header. |
| 505 | */ |
| 506 | static int br_rd_trans_rx_parse(struct spi_avmm_bridge *br, |
| 507 | u32 *val, unsigned int expected_count) |
| 508 | { |
| 509 | unsigned int i, trans_len = br->trans_len; |
| 510 | __le32 *data; |
| 511 | |
| 512 | if (expected_count * SPI_AVMM_VAL_SIZE != trans_len) |
| 513 | return -EFAULT; |
| 514 | |
| 515 | data = (__le32 *)br->trans_buf; |
| 516 | for (i = 0; i < expected_count; i++) |
| 517 | *val++ = le32_to_cpu(*data++); |
| 518 | |
| 519 | return 0; |
| 520 | } |
| 521 | |
| 522 | /* |
| 523 | * For write transactions, the slave will return a transaction response |
| 524 | * header. |
| 525 | */ |
| 526 | static int br_wr_trans_rx_parse(struct spi_avmm_bridge *br, |
| 527 | unsigned int expected_count) |
| 528 | { |
| 529 | unsigned int trans_len = br->trans_len; |
| 530 | struct trans_resp_header *resp; |
| 531 | u8 code; |
| 532 | u16 val_len; |
| 533 | |
| 534 | if (trans_len != TRANS_RESP_HD_SIZE) |
| 535 | return -EFAULT; |
| 536 | |
| 537 | resp = (struct trans_resp_header *)br->trans_buf; |
| 538 | |
| 539 | code = resp->r_code ^ 0x80; |
| 540 | val_len = be16_to_cpu(resp->size); |
| 541 | if (!val_len || val_len != expected_count * SPI_AVMM_VAL_SIZE) |
| 542 | return -EFAULT; |
| 543 | |
| 544 | /* error out if the trans code doesn't align with the val size */ |
| 545 | if ((val_len == SPI_AVMM_VAL_SIZE && code != TRANS_CODE_WRITE) || |
| 546 | (val_len > SPI_AVMM_VAL_SIZE && code != TRANS_CODE_SEQ_WRITE)) |
| 547 | return -EFAULT; |
| 548 | |
| 549 | return 0; |
| 550 | } |
| 551 | |
| 552 | static int do_reg_access(void *context, bool is_read, unsigned int reg, |
| 553 | unsigned int *value, unsigned int count) |
| 554 | { |
| 555 | struct spi_avmm_bridge *br = context; |
| 556 | int ret; |
| 557 | |
| 558 | /* invalidate bridge buffers first */ |
| 559 | br->trans_len = 0; |
| 560 | br->phy_len = 0; |
| 561 | |
| 562 | ret = br_trans_tx_prepare(br, is_read, reg, value, count); |
| 563 | if (ret) |
| 564 | return ret; |
| 565 | |
| 566 | ret = br_pkt_phy_tx_prepare(br); |
| 567 | if (ret) |
| 568 | return ret; |
| 569 | |
| 570 | ret = br_do_tx(br); |
| 571 | if (ret) |
| 572 | return ret; |
| 573 | |
| 574 | ret = br_do_rx_and_pkt_phy_parse(br); |
| 575 | if (ret) |
| 576 | return ret; |
| 577 | |
| 578 | if (is_read) |
| 579 | return br_rd_trans_rx_parse(br, value, count); |
| 580 | else |
| 581 | return br_wr_trans_rx_parse(br, count); |
| 582 | } |
| 583 | |
| 584 | static int regmap_spi_avmm_gather_write(void *context, |
| 585 | const void *reg_buf, size_t reg_len, |
| 586 | const void *val_buf, size_t val_len) |
| 587 | { |
| 588 | if (reg_len != SPI_AVMM_REG_SIZE) |
| 589 | return -EINVAL; |
| 590 | |
| 591 | if (!IS_ALIGNED(val_len, SPI_AVMM_VAL_SIZE)) |
| 592 | return -EINVAL; |
| 593 | |
| 594 | return do_reg_access(context, false, *(u32 *)reg_buf, (u32 *)val_buf, |
| 595 | val_len / SPI_AVMM_VAL_SIZE); |
| 596 | } |
| 597 | |
| 598 | static int regmap_spi_avmm_write(void *context, const void *data, size_t bytes) |
| 599 | { |
| 600 | if (bytes < SPI_AVMM_REG_SIZE + SPI_AVMM_VAL_SIZE) |
| 601 | return -EINVAL; |
| 602 | |
| 603 | return regmap_spi_avmm_gather_write(context, data, SPI_AVMM_REG_SIZE, |
| 604 | data + SPI_AVMM_REG_SIZE, |
| 605 | bytes - SPI_AVMM_REG_SIZE); |
| 606 | } |
| 607 | |
| 608 | static int regmap_spi_avmm_read(void *context, |
| 609 | const void *reg_buf, size_t reg_len, |
| 610 | void *val_buf, size_t val_len) |
| 611 | { |
| 612 | if (reg_len != SPI_AVMM_REG_SIZE) |
| 613 | return -EINVAL; |
| 614 | |
| 615 | if (!IS_ALIGNED(val_len, SPI_AVMM_VAL_SIZE)) |
| 616 | return -EINVAL; |
| 617 | |
| 618 | return do_reg_access(context, true, *(u32 *)reg_buf, val_buf, |
| 619 | (val_len / SPI_AVMM_VAL_SIZE)); |
| 620 | } |
| 621 | |
| 622 | static struct spi_avmm_bridge * |
| 623 | spi_avmm_bridge_ctx_gen(struct spi_device *spi) |
| 624 | { |
| 625 | struct spi_avmm_bridge *br; |
| 626 | |
| 627 | if (!spi) |
| 628 | return ERR_PTR(-ENODEV); |
| 629 | |
| 630 | /* Only support BPW == 8 or 32 now. Try 32 BPW first. */ |
| 631 | spi->mode = SPI_MODE_1; |
| 632 | spi->bits_per_word = 32; |
| 633 | if (spi_setup(spi)) { |
| 634 | spi->bits_per_word = 8; |
| 635 | if (spi_setup(spi)) |
| 636 | return ERR_PTR(-EINVAL); |
| 637 | } |
| 638 | |
| 639 | br = kzalloc(sizeof(*br), GFP_KERNEL); |
| 640 | if (!br) |
| 641 | return ERR_PTR(-ENOMEM); |
| 642 | |
| 643 | br->spi = spi; |
| 644 | br->word_len = spi->bits_per_word / 8; |
| 645 | if (br->word_len == 4) { |
| 646 | /* |
| 647 | * The protocol requires little endian byte order but MSB |
| 648 | * first. So driver needs to swap the byte order word by word |
| 649 | * if word length > 1. |
| 650 | */ |
| 651 | br->swap_words = br_swap_words_32; |
| 652 | } |
| 653 | |
| 654 | return br; |
| 655 | } |
| 656 | |
| 657 | static void spi_avmm_bridge_ctx_free(void *context) |
| 658 | { |
| 659 | kfree(context); |
| 660 | } |
| 661 | |
| 662 | static const struct regmap_bus regmap_spi_avmm_bus = { |
| 663 | .write = regmap_spi_avmm_write, |
| 664 | .gather_write = regmap_spi_avmm_gather_write, |
| 665 | .read = regmap_spi_avmm_read, |
| 666 | .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, |
| 667 | .val_format_endian_default = REGMAP_ENDIAN_NATIVE, |
| 668 | .max_raw_read = SPI_AVMM_VAL_SIZE * MAX_READ_CNT, |
| 669 | .max_raw_write = SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT, |
| 670 | .free_context = spi_avmm_bridge_ctx_free, |
| 671 | }; |
| 672 | |
| 673 | struct regmap *__regmap_init_spi_avmm(struct spi_device *spi, |
| 674 | const struct regmap_config *config, |
| 675 | struct lock_class_key *lock_key, |
| 676 | const char *lock_name) |
| 677 | { |
| 678 | struct spi_avmm_bridge *bridge; |
| 679 | struct regmap *map; |
| 680 | |
| 681 | bridge = spi_avmm_bridge_ctx_gen(spi); |
| 682 | if (IS_ERR(bridge)) |
| 683 | return ERR_CAST(bridge); |
| 684 | |
| 685 | map = __regmap_init(&spi->dev, ®map_spi_avmm_bus, |
| 686 | bridge, config, lock_key, lock_name); |
| 687 | if (IS_ERR(map)) { |
| 688 | spi_avmm_bridge_ctx_free(bridge); |
| 689 | return ERR_CAST(map); |
| 690 | } |
| 691 | |
| 692 | return map; |
| 693 | } |
| 694 | EXPORT_SYMBOL_GPL(__regmap_init_spi_avmm); |
| 695 | |
| 696 | struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi, |
| 697 | const struct regmap_config *config, |
| 698 | struct lock_class_key *lock_key, |
| 699 | const char *lock_name) |
| 700 | { |
| 701 | struct spi_avmm_bridge *bridge; |
| 702 | struct regmap *map; |
| 703 | |
| 704 | bridge = spi_avmm_bridge_ctx_gen(spi); |
| 705 | if (IS_ERR(bridge)) |
| 706 | return ERR_CAST(bridge); |
| 707 | |
| 708 | map = __devm_regmap_init(&spi->dev, ®map_spi_avmm_bus, |
| 709 | bridge, config, lock_key, lock_name); |
| 710 | if (IS_ERR(map)) { |
| 711 | spi_avmm_bridge_ctx_free(bridge); |
| 712 | return ERR_CAST(map); |
| 713 | } |
| 714 | |
| 715 | return map; |
| 716 | } |
| 717 | EXPORT_SYMBOL_GPL(__devm_regmap_init_spi_avmm); |
| 718 | |
| 719 | MODULE_LICENSE("GPL v2"); |