Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. |
| 4 | * Copyright (C) 2018-2020 Linaro Ltd. |
| 5 | */ |
| 6 | |
| 7 | #include <linux/types.h> |
| 8 | #include <linux/atomic.h> |
| 9 | #include <linux/bitfield.h> |
| 10 | #include <linux/device.h> |
| 11 | #include <linux/bug.h> |
| 12 | #include <linux/io.h> |
| 13 | #include <linux/firmware.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/of.h> |
| 16 | #include <linux/of_device.h> |
| 17 | #include <linux/of_address.h> |
| 18 | #include <linux/remoteproc.h> |
| 19 | #include <linux/qcom_scm.h> |
| 20 | #include <linux/soc/qcom/mdt_loader.h> |
| 21 | |
| 22 | #include "ipa.h" |
| 23 | #include "ipa_clock.h" |
| 24 | #include "ipa_data.h" |
| 25 | #include "ipa_endpoint.h" |
| 26 | #include "ipa_cmd.h" |
| 27 | #include "ipa_reg.h" |
| 28 | #include "ipa_mem.h" |
| 29 | #include "ipa_table.h" |
| 30 | #include "ipa_modem.h" |
| 31 | #include "ipa_uc.h" |
| 32 | #include "ipa_interrupt.h" |
| 33 | #include "gsi_trans.h" |
| 34 | |
| 35 | /** |
| 36 | * DOC: The IP Accelerator |
| 37 | * |
| 38 | * This driver supports the Qualcomm IP Accelerator (IPA), which is a |
| 39 | * networking component found in many Qualcomm SoCs. The IPA is connected |
| 40 | * to the application processor (AP), but is also connected (and partially |
| 41 | * controlled by) other "execution environments" (EEs), such as a modem. |
| 42 | * |
| 43 | * The IPA is the conduit between the AP and the modem that carries network |
| 44 | * traffic. This driver presents a network interface representing the |
| 45 | * connection of the modem to external (e.g. LTE) networks. |
| 46 | * |
| 47 | * The IPA provides protocol checksum calculation, offloading this work |
| 48 | * from the AP. The IPA offers additional functionality, including routing, |
| 49 | * filtering, and NAT support, but that more advanced functionality is not |
| 50 | * currently supported. Despite that, some resources--including routing |
| 51 | * tables and filter tables--are defined in this driver because they must |
| 52 | * be initialized even when the advanced hardware features are not used. |
| 53 | * |
| 54 | * There are two distinct layers that implement the IPA hardware, and this |
| 55 | * is reflected in the organization of the driver. The generic software |
| 56 | * interface (GSI) is an integral component of the IPA, providing a |
| 57 | * well-defined communication layer between the AP subsystem and the IPA |
| 58 | * core. The GSI implements a set of "channels" used for communication |
| 59 | * between the AP and the IPA. |
| 60 | * |
| 61 | * The IPA layer uses GSI channels to implement its "endpoints". And while |
| 62 | * a GSI channel carries data between the AP and the IPA, a pair of IPA |
| 63 | * endpoints is used to carry traffic between two EEs. Specifically, the main |
| 64 | * modem network interface is implemented by two pairs of endpoints: a TX |
| 65 | * endpoint on the AP coupled with an RX endpoint on the modem; and another |
| 66 | * RX endpoint on the AP receiving data from a TX endpoint on the modem. |
| 67 | */ |
| 68 | |
| 69 | /* The name of the GSI firmware file relative to /lib/firmware */ |
| 70 | #define IPA_FWS_PATH "ipa_fws.mdt" |
| 71 | #define IPA_PAS_ID 15 |
| 72 | |
| 73 | /** |
| 74 | * ipa_suspend_handler() - Handle the suspend IPA interrupt |
| 75 | * @ipa: IPA pointer |
| 76 | * @irq_id: IPA interrupt type (unused) |
| 77 | * |
| 78 | * If an RX endpoint is in suspend state, and the IPA has a packet |
| 79 | * destined for that endpoint, the IPA generates a SUSPEND interrupt |
| 80 | * to inform the AP that it should resume the endpoint. If we get |
| 81 | * one of these interrupts we just resume everything. |
| 82 | */ |
| 83 | static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id) |
| 84 | { |
| 85 | /* Just report the event, and let system resume handle the rest. |
| 86 | * More than one endpoint could signal this; if so, ignore |
| 87 | * all but the first. |
| 88 | */ |
| 89 | if (!test_and_set_bit(IPA_FLAG_RESUMED, ipa->flags)) |
| 90 | pm_wakeup_dev_event(&ipa->pdev->dev, 0, true); |
| 91 | |
| 92 | /* Acknowledge/clear the suspend interrupt on all endpoints */ |
| 93 | ipa_interrupt_suspend_clear_all(ipa->interrupt); |
| 94 | } |
| 95 | |
| 96 | /** |
| 97 | * ipa_setup() - Set up IPA hardware |
| 98 | * @ipa: IPA pointer |
| 99 | * |
| 100 | * Perform initialization that requires issuing immediate commands on |
| 101 | * the command TX endpoint. If the modem is doing GSI firmware load |
| 102 | * and initialization, this function will be called when an SMP2P |
| 103 | * interrupt has been signaled by the modem. Otherwise it will be |
| 104 | * called from ipa_probe() after GSI firmware has been successfully |
| 105 | * loaded, authenticated, and started by Trust Zone. |
| 106 | */ |
| 107 | int ipa_setup(struct ipa *ipa) |
| 108 | { |
| 109 | struct ipa_endpoint *exception_endpoint; |
| 110 | struct ipa_endpoint *command_endpoint; |
| 111 | struct device *dev = &ipa->pdev->dev; |
| 112 | int ret; |
| 113 | |
| 114 | /* Setup for IPA v3.5.1 has some slight differences */ |
| 115 | ret = gsi_setup(&ipa->gsi, ipa->version == IPA_VERSION_3_5_1); |
| 116 | if (ret) |
| 117 | return ret; |
| 118 | |
| 119 | ipa->interrupt = ipa_interrupt_setup(ipa); |
| 120 | if (IS_ERR(ipa->interrupt)) { |
| 121 | ret = PTR_ERR(ipa->interrupt); |
| 122 | goto err_gsi_teardown; |
| 123 | } |
| 124 | ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND, |
| 125 | ipa_suspend_handler); |
| 126 | |
| 127 | ipa_uc_setup(ipa); |
| 128 | |
| 129 | ret = device_init_wakeup(dev, true); |
| 130 | if (ret) |
| 131 | goto err_uc_teardown; |
| 132 | |
| 133 | ipa_endpoint_setup(ipa); |
| 134 | |
| 135 | /* We need to use the AP command TX endpoint to perform other |
| 136 | * initialization, so we enable first. |
| 137 | */ |
| 138 | command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; |
| 139 | ret = ipa_endpoint_enable_one(command_endpoint); |
| 140 | if (ret) |
| 141 | goto err_endpoint_teardown; |
| 142 | |
| 143 | ret = ipa_mem_setup(ipa); |
| 144 | if (ret) |
| 145 | goto err_command_disable; |
| 146 | |
| 147 | ret = ipa_table_setup(ipa); |
| 148 | if (ret) |
| 149 | goto err_mem_teardown; |
| 150 | |
| 151 | /* Enable the exception handling endpoint, and tell the hardware |
| 152 | * to use it by default. |
| 153 | */ |
| 154 | exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]; |
| 155 | ret = ipa_endpoint_enable_one(exception_endpoint); |
| 156 | if (ret) |
| 157 | goto err_table_teardown; |
| 158 | |
| 159 | ipa_endpoint_default_route_set(ipa, exception_endpoint->endpoint_id); |
| 160 | |
| 161 | /* We're all set. Now prepare for communication with the modem */ |
| 162 | ret = ipa_modem_setup(ipa); |
| 163 | if (ret) |
| 164 | goto err_default_route_clear; |
| 165 | |
| 166 | ipa->setup_complete = true; |
| 167 | |
| 168 | dev_info(dev, "IPA driver setup completed successfully\n"); |
| 169 | |
| 170 | return 0; |
| 171 | |
| 172 | err_default_route_clear: |
| 173 | ipa_endpoint_default_route_clear(ipa); |
| 174 | ipa_endpoint_disable_one(exception_endpoint); |
| 175 | err_table_teardown: |
| 176 | ipa_table_teardown(ipa); |
| 177 | err_mem_teardown: |
| 178 | ipa_mem_teardown(ipa); |
| 179 | err_command_disable: |
| 180 | ipa_endpoint_disable_one(command_endpoint); |
| 181 | err_endpoint_teardown: |
| 182 | ipa_endpoint_teardown(ipa); |
| 183 | (void)device_init_wakeup(dev, false); |
| 184 | err_uc_teardown: |
| 185 | ipa_uc_teardown(ipa); |
| 186 | ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); |
| 187 | ipa_interrupt_teardown(ipa->interrupt); |
| 188 | err_gsi_teardown: |
| 189 | gsi_teardown(&ipa->gsi); |
| 190 | |
| 191 | return ret; |
| 192 | } |
| 193 | |
| 194 | /** |
| 195 | * ipa_teardown() - Inverse of ipa_setup() |
| 196 | * @ipa: IPA pointer |
| 197 | */ |
| 198 | static void ipa_teardown(struct ipa *ipa) |
| 199 | { |
| 200 | struct ipa_endpoint *exception_endpoint; |
| 201 | struct ipa_endpoint *command_endpoint; |
| 202 | |
| 203 | ipa_modem_teardown(ipa); |
| 204 | ipa_endpoint_default_route_clear(ipa); |
| 205 | exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]; |
| 206 | ipa_endpoint_disable_one(exception_endpoint); |
| 207 | ipa_table_teardown(ipa); |
| 208 | ipa_mem_teardown(ipa); |
| 209 | command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; |
| 210 | ipa_endpoint_disable_one(command_endpoint); |
| 211 | ipa_endpoint_teardown(ipa); |
| 212 | (void)device_init_wakeup(&ipa->pdev->dev, false); |
| 213 | ipa_uc_teardown(ipa); |
| 214 | ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); |
| 215 | ipa_interrupt_teardown(ipa->interrupt); |
| 216 | gsi_teardown(&ipa->gsi); |
| 217 | } |
| 218 | |
| 219 | /* Configure QMB Core Master Port selection */ |
| 220 | static void ipa_hardware_config_comp(struct ipa *ipa) |
| 221 | { |
| 222 | u32 val; |
| 223 | |
| 224 | /* Nothing to configure for IPA v3.5.1 */ |
| 225 | if (ipa->version == IPA_VERSION_3_5_1) |
| 226 | return; |
| 227 | |
| 228 | val = ioread32(ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET); |
| 229 | |
| 230 | if (ipa->version == IPA_VERSION_4_0) { |
| 231 | val &= ~IPA_QMB_SELECT_CONS_EN_FMASK; |
| 232 | val &= ~IPA_QMB_SELECT_PROD_EN_FMASK; |
| 233 | val &= ~IPA_QMB_SELECT_GLOBAL_EN_FMASK; |
| 234 | } else { |
| 235 | val |= GSI_MULTI_AXI_MASTERS_DIS_FMASK; |
| 236 | } |
| 237 | |
| 238 | val |= GSI_MULTI_INORDER_RD_DIS_FMASK; |
| 239 | val |= GSI_MULTI_INORDER_WR_DIS_FMASK; |
| 240 | |
| 241 | iowrite32(val, ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET); |
| 242 | } |
| 243 | |
| 244 | /* Configure DDR and PCIe max read/write QSB values */ |
| 245 | static void ipa_hardware_config_qsb(struct ipa *ipa) |
| 246 | { |
| 247 | u32 val; |
| 248 | |
| 249 | /* QMB_0 represents DDR; QMB_1 represents PCIe (not present in 4.2) */ |
| 250 | val = u32_encode_bits(8, GEN_QMB_0_MAX_WRITES_FMASK); |
| 251 | if (ipa->version == IPA_VERSION_4_2) |
| 252 | val |= u32_encode_bits(0, GEN_QMB_1_MAX_WRITES_FMASK); |
| 253 | else |
| 254 | val |= u32_encode_bits(4, GEN_QMB_1_MAX_WRITES_FMASK); |
| 255 | iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_WRITES_OFFSET); |
| 256 | |
| 257 | if (ipa->version == IPA_VERSION_3_5_1) { |
| 258 | val = u32_encode_bits(8, GEN_QMB_0_MAX_READS_FMASK); |
| 259 | val |= u32_encode_bits(12, GEN_QMB_1_MAX_READS_FMASK); |
| 260 | } else { |
| 261 | val = u32_encode_bits(12, GEN_QMB_0_MAX_READS_FMASK); |
| 262 | if (ipa->version == IPA_VERSION_4_2) |
| 263 | val |= u32_encode_bits(0, GEN_QMB_1_MAX_READS_FMASK); |
| 264 | else |
| 265 | val |= u32_encode_bits(12, GEN_QMB_1_MAX_READS_FMASK); |
| 266 | /* GEN_QMB_0_MAX_READS_BEATS is 0 */ |
| 267 | /* GEN_QMB_1_MAX_READS_BEATS is 0 */ |
| 268 | } |
| 269 | iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_READS_OFFSET); |
| 270 | } |
| 271 | |
| 272 | static void ipa_idle_indication_cfg(struct ipa *ipa, |
| 273 | u32 enter_idle_debounce_thresh, |
| 274 | bool const_non_idle_enable) |
| 275 | { |
| 276 | u32 offset; |
| 277 | u32 val; |
| 278 | |
| 279 | val = u32_encode_bits(enter_idle_debounce_thresh, |
| 280 | ENTER_IDLE_DEBOUNCE_THRESH_FMASK); |
| 281 | if (const_non_idle_enable) |
| 282 | val |= CONST_NON_IDLE_ENABLE_FMASK; |
| 283 | |
| 284 | offset = ipa_reg_idle_indication_cfg_offset(ipa->version); |
| 285 | iowrite32(val, ipa->reg_virt + offset); |
| 286 | } |
| 287 | |
| 288 | /** |
| 289 | * ipa_hardware_dcd_config() - Enable dynamic clock division on IPA |
| 290 | * @ipa: IPA pointer |
| 291 | * |
| 292 | * Configures when the IPA signals it is idle to the global clock |
| 293 | * controller, which can respond by scalling down the clock to |
| 294 | * save power. |
| 295 | */ |
| 296 | static void ipa_hardware_dcd_config(struct ipa *ipa) |
| 297 | { |
| 298 | /* Recommended values for IPA 3.5 according to IPA HPG */ |
| 299 | ipa_idle_indication_cfg(ipa, 256, false); |
| 300 | } |
| 301 | |
| 302 | static void ipa_hardware_dcd_deconfig(struct ipa *ipa) |
| 303 | { |
| 304 | /* Power-on reset values */ |
| 305 | ipa_idle_indication_cfg(ipa, 0, true); |
| 306 | } |
| 307 | |
| 308 | /** |
| 309 | * ipa_hardware_config() - Primitive hardware initialization |
| 310 | * @ipa: IPA pointer |
| 311 | */ |
| 312 | static void ipa_hardware_config(struct ipa *ipa) |
| 313 | { |
| 314 | u32 granularity; |
| 315 | u32 val; |
| 316 | |
| 317 | /* Fill in backward-compatibility register, based on version */ |
| 318 | val = ipa_reg_bcr_val(ipa->version); |
| 319 | iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET); |
| 320 | |
| 321 | if (ipa->version != IPA_VERSION_3_5_1) { |
| 322 | /* Enable open global clocks (hardware workaround) */ |
| 323 | val = GLOBAL_FMASK; |
| 324 | val |= GLOBAL_2X_CLK_FMASK; |
| 325 | iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET); |
| 326 | |
| 327 | /* Disable PA mask to allow HOLB drop (hardware workaround) */ |
| 328 | val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET); |
| 329 | val &= ~PA_MASK_EN; |
| 330 | iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET); |
| 331 | } |
| 332 | |
| 333 | ipa_hardware_config_comp(ipa); |
| 334 | |
| 335 | /* Configure system bus limits */ |
| 336 | ipa_hardware_config_qsb(ipa); |
| 337 | |
| 338 | /* Configure aggregation granularity */ |
| 339 | val = ioread32(ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET); |
| 340 | granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY); |
| 341 | val = u32_encode_bits(granularity, AGGR_GRANULARITY); |
| 342 | iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET); |
| 343 | |
| 344 | /* Disable hashed IPv4 and IPv6 routing and filtering for IPA v4.2 */ |
| 345 | if (ipa->version == IPA_VERSION_4_2) |
| 346 | iowrite32(0, ipa->reg_virt + IPA_REG_FILT_ROUT_HASH_EN_OFFSET); |
| 347 | |
| 348 | /* Enable dynamic clock division */ |
| 349 | ipa_hardware_dcd_config(ipa); |
| 350 | } |
| 351 | |
| 352 | /** |
| 353 | * ipa_hardware_deconfig() - Inverse of ipa_hardware_config() |
| 354 | * @ipa: IPA pointer |
| 355 | * |
| 356 | * This restores the power-on reset values (even if they aren't different) |
| 357 | */ |
| 358 | static void ipa_hardware_deconfig(struct ipa *ipa) |
| 359 | { |
| 360 | /* Mostly we just leave things as we set them. */ |
| 361 | ipa_hardware_dcd_deconfig(ipa); |
| 362 | } |
| 363 | |
| 364 | #ifdef IPA_VALIDATION |
| 365 | |
| 366 | /* # IPA resources used based on version (see IPA_RESOURCE_GROUP_COUNT) */ |
| 367 | static int ipa_resource_group_count(struct ipa *ipa) |
| 368 | { |
| 369 | switch (ipa->version) { |
| 370 | case IPA_VERSION_3_5_1: |
| 371 | return 3; |
| 372 | |
| 373 | case IPA_VERSION_4_0: |
| 374 | case IPA_VERSION_4_1: |
| 375 | return 4; |
| 376 | |
| 377 | case IPA_VERSION_4_2: |
| 378 | return 1; |
| 379 | |
| 380 | default: |
| 381 | return 0; |
| 382 | } |
| 383 | } |
| 384 | |
| 385 | static bool ipa_resource_limits_valid(struct ipa *ipa, |
| 386 | const struct ipa_resource_data *data) |
| 387 | { |
| 388 | u32 group_count = ipa_resource_group_count(ipa); |
| 389 | u32 i; |
| 390 | u32 j; |
| 391 | |
| 392 | if (!group_count) |
| 393 | return false; |
| 394 | |
| 395 | /* Return an error if a non-zero resource group limit is specified |
| 396 | * for a resource not supported by hardware. |
| 397 | */ |
| 398 | for (i = 0; i < data->resource_src_count; i++) { |
| 399 | const struct ipa_resource_src *resource; |
| 400 | |
| 401 | resource = &data->resource_src[i]; |
| 402 | for (j = group_count; j < IPA_RESOURCE_GROUP_COUNT; j++) |
| 403 | if (resource->limits[j].min || resource->limits[j].max) |
| 404 | return false; |
| 405 | } |
| 406 | |
| 407 | for (i = 0; i < data->resource_dst_count; i++) { |
| 408 | const struct ipa_resource_dst *resource; |
| 409 | |
| 410 | resource = &data->resource_dst[i]; |
| 411 | for (j = group_count; j < IPA_RESOURCE_GROUP_COUNT; j++) |
| 412 | if (resource->limits[j].min || resource->limits[j].max) |
| 413 | return false; |
| 414 | } |
| 415 | |
| 416 | return true; |
| 417 | } |
| 418 | |
| 419 | #else /* !IPA_VALIDATION */ |
| 420 | |
| 421 | static bool ipa_resource_limits_valid(struct ipa *ipa, |
| 422 | const struct ipa_resource_data *data) |
| 423 | { |
| 424 | return true; |
| 425 | } |
| 426 | |
| 427 | #endif /* !IPA_VALIDATION */ |
| 428 | |
| 429 | static void |
| 430 | ipa_resource_config_common(struct ipa *ipa, u32 offset, |
| 431 | const struct ipa_resource_limits *xlimits, |
| 432 | const struct ipa_resource_limits *ylimits) |
| 433 | { |
| 434 | u32 val; |
| 435 | |
| 436 | val = u32_encode_bits(xlimits->min, X_MIN_LIM_FMASK); |
| 437 | val |= u32_encode_bits(xlimits->max, X_MAX_LIM_FMASK); |
| 438 | val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK); |
| 439 | val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK); |
| 440 | |
| 441 | iowrite32(val, ipa->reg_virt + offset); |
| 442 | } |
| 443 | |
| 444 | static void ipa_resource_config_src_01(struct ipa *ipa, |
| 445 | const struct ipa_resource_src *resource) |
| 446 | { |
| 447 | u32 offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type); |
| 448 | |
| 449 | ipa_resource_config_common(ipa, offset, |
| 450 | &resource->limits[0], &resource->limits[1]); |
| 451 | } |
| 452 | |
| 453 | static void ipa_resource_config_src_23(struct ipa *ipa, |
| 454 | const struct ipa_resource_src *resource) |
| 455 | { |
| 456 | u32 offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type); |
| 457 | |
| 458 | ipa_resource_config_common(ipa, offset, |
| 459 | &resource->limits[2], &resource->limits[3]); |
| 460 | } |
| 461 | |
| 462 | static void ipa_resource_config_dst_01(struct ipa *ipa, |
| 463 | const struct ipa_resource_dst *resource) |
| 464 | { |
| 465 | u32 offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type); |
| 466 | |
| 467 | ipa_resource_config_common(ipa, offset, |
| 468 | &resource->limits[0], &resource->limits[1]); |
| 469 | } |
| 470 | |
| 471 | static void ipa_resource_config_dst_23(struct ipa *ipa, |
| 472 | const struct ipa_resource_dst *resource) |
| 473 | { |
| 474 | u32 offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type); |
| 475 | |
| 476 | ipa_resource_config_common(ipa, offset, |
| 477 | &resource->limits[2], &resource->limits[3]); |
| 478 | } |
| 479 | |
| 480 | static int |
| 481 | ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data) |
| 482 | { |
| 483 | u32 i; |
| 484 | |
| 485 | if (!ipa_resource_limits_valid(ipa, data)) |
| 486 | return -EINVAL; |
| 487 | |
| 488 | for (i = 0; i < data->resource_src_count; i++) { |
| 489 | ipa_resource_config_src_01(ipa, &data->resource_src[i]); |
| 490 | ipa_resource_config_src_23(ipa, &data->resource_src[i]); |
| 491 | } |
| 492 | |
| 493 | for (i = 0; i < data->resource_dst_count; i++) { |
| 494 | ipa_resource_config_dst_01(ipa, &data->resource_dst[i]); |
| 495 | ipa_resource_config_dst_23(ipa, &data->resource_dst[i]); |
| 496 | } |
| 497 | |
| 498 | return 0; |
| 499 | } |
| 500 | |
| 501 | static void ipa_resource_deconfig(struct ipa *ipa) |
| 502 | { |
| 503 | /* Nothing to do */ |
| 504 | } |
| 505 | |
| 506 | /** |
| 507 | * ipa_config() - Configure IPA hardware |
| 508 | * @ipa: IPA pointer |
| 509 | * @data: IPA configuration data |
| 510 | * |
| 511 | * Perform initialization requiring IPA clock to be enabled. |
| 512 | */ |
| 513 | static int ipa_config(struct ipa *ipa, const struct ipa_data *data) |
| 514 | { |
| 515 | int ret; |
| 516 | |
| 517 | /* Get a clock reference to allow initialization. This reference |
| 518 | * is held after initialization completes, and won't get dropped |
| 519 | * unless/until a system suspend request arrives. |
| 520 | */ |
| 521 | ipa_clock_get(ipa); |
| 522 | |
| 523 | ipa_hardware_config(ipa); |
| 524 | |
| 525 | ret = ipa_endpoint_config(ipa); |
| 526 | if (ret) |
| 527 | goto err_hardware_deconfig; |
| 528 | |
| 529 | ret = ipa_mem_config(ipa); |
| 530 | if (ret) |
| 531 | goto err_endpoint_deconfig; |
| 532 | |
| 533 | ipa_table_config(ipa); |
| 534 | |
| 535 | /* Assign resource limitation to each group */ |
| 536 | ret = ipa_resource_config(ipa, data->resource_data); |
| 537 | if (ret) |
| 538 | goto err_table_deconfig; |
| 539 | |
| 540 | ret = ipa_modem_config(ipa); |
| 541 | if (ret) |
| 542 | goto err_resource_deconfig; |
| 543 | |
| 544 | return 0; |
| 545 | |
| 546 | err_resource_deconfig: |
| 547 | ipa_resource_deconfig(ipa); |
| 548 | err_table_deconfig: |
| 549 | ipa_table_deconfig(ipa); |
| 550 | ipa_mem_deconfig(ipa); |
| 551 | err_endpoint_deconfig: |
| 552 | ipa_endpoint_deconfig(ipa); |
| 553 | err_hardware_deconfig: |
| 554 | ipa_hardware_deconfig(ipa); |
| 555 | ipa_clock_put(ipa); |
| 556 | |
| 557 | return ret; |
| 558 | } |
| 559 | |
| 560 | /** |
| 561 | * ipa_deconfig() - Inverse of ipa_config() |
| 562 | * @ipa: IPA pointer |
| 563 | */ |
| 564 | static void ipa_deconfig(struct ipa *ipa) |
| 565 | { |
| 566 | ipa_modem_deconfig(ipa); |
| 567 | ipa_resource_deconfig(ipa); |
| 568 | ipa_table_deconfig(ipa); |
| 569 | ipa_mem_deconfig(ipa); |
| 570 | ipa_endpoint_deconfig(ipa); |
| 571 | ipa_hardware_deconfig(ipa); |
| 572 | ipa_clock_put(ipa); |
| 573 | } |
| 574 | |
| 575 | static int ipa_firmware_load(struct device *dev) |
| 576 | { |
| 577 | const struct firmware *fw; |
| 578 | struct device_node *node; |
| 579 | struct resource res; |
| 580 | phys_addr_t phys; |
| 581 | ssize_t size; |
| 582 | void *virt; |
| 583 | int ret; |
| 584 | |
| 585 | node = of_parse_phandle(dev->of_node, "memory-region", 0); |
| 586 | if (!node) { |
| 587 | dev_err(dev, "DT error getting \"memory-region\" property\n"); |
| 588 | return -EINVAL; |
| 589 | } |
| 590 | |
| 591 | ret = of_address_to_resource(node, 0, &res); |
| 592 | of_node_put(node); |
| 593 | if (ret) { |
| 594 | dev_err(dev, "error %d getting \"memory-region\" resource\n", |
| 595 | ret); |
| 596 | return ret; |
| 597 | } |
| 598 | |
| 599 | ret = request_firmware(&fw, IPA_FWS_PATH, dev); |
| 600 | if (ret) { |
| 601 | dev_err(dev, "error %d requesting \"%s\"\n", ret, IPA_FWS_PATH); |
| 602 | return ret; |
| 603 | } |
| 604 | |
| 605 | phys = res.start; |
| 606 | size = (size_t)resource_size(&res); |
| 607 | virt = memremap(phys, size, MEMREMAP_WC); |
| 608 | if (!virt) { |
| 609 | dev_err(dev, "unable to remap firmware memory\n"); |
| 610 | ret = -ENOMEM; |
| 611 | goto out_release_firmware; |
| 612 | } |
| 613 | |
| 614 | ret = qcom_mdt_load(dev, fw, IPA_FWS_PATH, IPA_PAS_ID, |
| 615 | virt, phys, size, NULL); |
| 616 | if (ret) |
| 617 | dev_err(dev, "error %d loading \"%s\"\n", ret, IPA_FWS_PATH); |
| 618 | else if ((ret = qcom_scm_pas_auth_and_reset(IPA_PAS_ID))) |
| 619 | dev_err(dev, "error %d authenticating \"%s\"\n", ret, |
| 620 | IPA_FWS_PATH); |
| 621 | |
| 622 | memunmap(virt); |
| 623 | out_release_firmware: |
| 624 | release_firmware(fw); |
| 625 | |
| 626 | return ret; |
| 627 | } |
| 628 | |
| 629 | static const struct of_device_id ipa_match[] = { |
| 630 | { |
| 631 | .compatible = "qcom,sdm845-ipa", |
| 632 | .data = &ipa_data_sdm845, |
| 633 | }, |
| 634 | { |
| 635 | .compatible = "qcom,sc7180-ipa", |
| 636 | .data = &ipa_data_sc7180, |
| 637 | }, |
| 638 | { }, |
| 639 | }; |
| 640 | MODULE_DEVICE_TABLE(of, ipa_match); |
| 641 | |
| 642 | static phandle of_property_read_phandle(const struct device_node *np, |
| 643 | const char *name) |
| 644 | { |
| 645 | struct property *prop; |
| 646 | int len = 0; |
| 647 | |
| 648 | prop = of_find_property(np, name, &len); |
| 649 | if (!prop || len != sizeof(__be32)) |
| 650 | return 0; |
| 651 | |
| 652 | return be32_to_cpup(prop->value); |
| 653 | } |
| 654 | |
| 655 | /* Check things that can be validated at build time. This just |
| 656 | * groups these things BUILD_BUG_ON() calls don't clutter the rest |
| 657 | * of the code. |
| 658 | * */ |
| 659 | static void ipa_validate_build(void) |
| 660 | { |
| 661 | #ifdef IPA_VALIDATE |
| 662 | /* We assume we're working on 64-bit hardware */ |
| 663 | BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT)); |
| 664 | |
| 665 | /* Code assumes the EE ID for the AP is 0 (zeroed structure field) */ |
| 666 | BUILD_BUG_ON(GSI_EE_AP != 0); |
| 667 | |
| 668 | /* There's no point if we have no channels or event rings */ |
| 669 | BUILD_BUG_ON(!GSI_CHANNEL_COUNT_MAX); |
| 670 | BUILD_BUG_ON(!GSI_EVT_RING_COUNT_MAX); |
| 671 | |
| 672 | /* GSI hardware design limits */ |
| 673 | BUILD_BUG_ON(GSI_CHANNEL_COUNT_MAX > 32); |
| 674 | BUILD_BUG_ON(GSI_EVT_RING_COUNT_MAX > 31); |
| 675 | |
| 676 | /* The number of TREs in a transaction is limited by the channel's |
| 677 | * TLV FIFO size. A transaction structure uses 8-bit fields |
| 678 | * to represents the number of TREs it has allocated and used. |
| 679 | */ |
| 680 | BUILD_BUG_ON(GSI_TLV_MAX > U8_MAX); |
| 681 | |
| 682 | /* Exceeding 128 bytes makes the transaction pool *much* larger */ |
| 683 | BUILD_BUG_ON(sizeof(struct gsi_trans) > 128); |
| 684 | |
| 685 | /* This is used as a divisor */ |
| 686 | BUILD_BUG_ON(!IPA_AGGR_GRANULARITY); |
| 687 | |
| 688 | /* Aggregation granularity value can't be 0, and must fit */ |
| 689 | BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY)); |
| 690 | BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) > |
| 691 | field_max(AGGR_GRANULARITY)); |
| 692 | #endif /* IPA_VALIDATE */ |
| 693 | } |
| 694 | |
| 695 | /** |
| 696 | * ipa_probe() - IPA platform driver probe function |
| 697 | * @pdev: Platform device pointer |
| 698 | * |
| 699 | * Return: 0 if successful, or a negative error code (possibly |
| 700 | * EPROBE_DEFER) |
| 701 | * |
| 702 | * This is the main entry point for the IPA driver. Initialization proceeds |
| 703 | * in several stages: |
| 704 | * - The "init" stage involves activities that can be initialized without |
| 705 | * access to the IPA hardware. |
| 706 | * - The "config" stage requires the IPA clock to be active so IPA registers |
| 707 | * can be accessed, but does not require the use of IPA immediate commands. |
| 708 | * - The "setup" stage uses IPA immediate commands, and so requires the GSI |
| 709 | * layer to be initialized. |
| 710 | * |
| 711 | * A Boolean Device Tree "modem-init" property determines whether GSI |
| 712 | * initialization will be performed by the AP (Trust Zone) or the modem. |
| 713 | * If the AP does GSI initialization, the setup phase is entered after |
| 714 | * this has completed successfully. Otherwise the modem initializes |
| 715 | * the GSI layer and signals it has finished by sending an SMP2P interrupt |
| 716 | * to the AP; this triggers the start if IPA setup. |
| 717 | */ |
| 718 | static int ipa_probe(struct platform_device *pdev) |
| 719 | { |
| 720 | struct device *dev = &pdev->dev; |
| 721 | const struct ipa_data *data; |
| 722 | struct ipa_clock *clock; |
| 723 | struct rproc *rproc; |
| 724 | bool modem_alloc; |
| 725 | bool modem_init; |
| 726 | struct ipa *ipa; |
| 727 | bool prefetch; |
| 728 | phandle ph; |
| 729 | int ret; |
| 730 | |
| 731 | ipa_validate_build(); |
| 732 | |
| 733 | /* If we need Trust Zone, make sure it's available */ |
| 734 | modem_init = of_property_read_bool(dev->of_node, "modem-init"); |
| 735 | if (!modem_init) |
| 736 | if (!qcom_scm_is_available()) |
| 737 | return -EPROBE_DEFER; |
| 738 | |
| 739 | /* We rely on remoteproc to tell us about modem state changes */ |
| 740 | ph = of_property_read_phandle(dev->of_node, "modem-remoteproc"); |
| 741 | if (!ph) { |
| 742 | dev_err(dev, "DT missing \"modem-remoteproc\" property\n"); |
| 743 | return -EINVAL; |
| 744 | } |
| 745 | |
| 746 | rproc = rproc_get_by_phandle(ph); |
| 747 | if (!rproc) |
| 748 | return -EPROBE_DEFER; |
| 749 | |
| 750 | /* The clock and interconnects might not be ready when we're |
| 751 | * probed, so might return -EPROBE_DEFER. |
| 752 | */ |
| 753 | clock = ipa_clock_init(dev); |
| 754 | if (IS_ERR(clock)) { |
| 755 | ret = PTR_ERR(clock); |
| 756 | goto err_rproc_put; |
| 757 | } |
| 758 | |
| 759 | /* No more EPROBE_DEFER. Get our configuration data */ |
| 760 | data = of_device_get_match_data(dev); |
| 761 | if (!data) { |
| 762 | /* This is really IPA_VALIDATE (should never happen) */ |
| 763 | dev_err(dev, "matched hardware not supported\n"); |
| 764 | ret = -ENOTSUPP; |
| 765 | goto err_clock_exit; |
| 766 | } |
| 767 | |
| 768 | /* Allocate and initialize the IPA structure */ |
| 769 | ipa = kzalloc(sizeof(*ipa), GFP_KERNEL); |
| 770 | if (!ipa) { |
| 771 | ret = -ENOMEM; |
| 772 | goto err_clock_exit; |
| 773 | } |
| 774 | |
| 775 | ipa->pdev = pdev; |
| 776 | dev_set_drvdata(dev, ipa); |
| 777 | ipa->modem_rproc = rproc; |
| 778 | ipa->clock = clock; |
| 779 | ipa->version = data->version; |
| 780 | |
| 781 | ret = ipa_reg_init(ipa); |
| 782 | if (ret) |
| 783 | goto err_kfree_ipa; |
| 784 | |
| 785 | ret = ipa_mem_init(ipa, data->mem_data); |
| 786 | if (ret) |
| 787 | goto err_reg_exit; |
| 788 | |
| 789 | /* GSI v2.0+ (IPA v4.0+) uses prefetch for the command channel */ |
| 790 | prefetch = ipa->version != IPA_VERSION_3_5_1; |
| 791 | /* IPA v4.2 requires the AP to allocate channels for the modem */ |
| 792 | modem_alloc = ipa->version == IPA_VERSION_4_2; |
| 793 | |
| 794 | ret = gsi_init(&ipa->gsi, pdev, prefetch, data->endpoint_count, |
| 795 | data->endpoint_data, modem_alloc); |
| 796 | if (ret) |
| 797 | goto err_mem_exit; |
| 798 | |
| 799 | /* Result is a non-zero mask endpoints that support filtering */ |
| 800 | ipa->filter_map = ipa_endpoint_init(ipa, data->endpoint_count, |
| 801 | data->endpoint_data); |
| 802 | if (!ipa->filter_map) { |
| 803 | ret = -EINVAL; |
| 804 | goto err_gsi_exit; |
| 805 | } |
| 806 | |
| 807 | ret = ipa_table_init(ipa); |
| 808 | if (ret) |
| 809 | goto err_endpoint_exit; |
| 810 | |
| 811 | ret = ipa_modem_init(ipa, modem_init); |
| 812 | if (ret) |
| 813 | goto err_table_exit; |
| 814 | |
| 815 | ret = ipa_config(ipa, data); |
| 816 | if (ret) |
| 817 | goto err_modem_exit; |
| 818 | |
| 819 | dev_info(dev, "IPA driver initialized"); |
| 820 | |
| 821 | /* If the modem is doing early initialization, it will trigger a |
| 822 | * call to ipa_setup() call when it has finished. In that case |
| 823 | * we're done here. |
| 824 | */ |
| 825 | if (modem_init) |
| 826 | return 0; |
| 827 | |
| 828 | /* Otherwise we need to load the firmware and have Trust Zone validate |
| 829 | * and install it. If that succeeds we can proceed with setup. |
| 830 | */ |
| 831 | ret = ipa_firmware_load(dev); |
| 832 | if (ret) |
| 833 | goto err_deconfig; |
| 834 | |
| 835 | ret = ipa_setup(ipa); |
| 836 | if (ret) |
| 837 | goto err_deconfig; |
| 838 | |
| 839 | return 0; |
| 840 | |
| 841 | err_deconfig: |
| 842 | ipa_deconfig(ipa); |
| 843 | err_modem_exit: |
| 844 | ipa_modem_exit(ipa); |
| 845 | err_table_exit: |
| 846 | ipa_table_exit(ipa); |
| 847 | err_endpoint_exit: |
| 848 | ipa_endpoint_exit(ipa); |
| 849 | err_gsi_exit: |
| 850 | gsi_exit(&ipa->gsi); |
| 851 | err_mem_exit: |
| 852 | ipa_mem_exit(ipa); |
| 853 | err_reg_exit: |
| 854 | ipa_reg_exit(ipa); |
| 855 | err_kfree_ipa: |
| 856 | kfree(ipa); |
| 857 | err_clock_exit: |
| 858 | ipa_clock_exit(clock); |
| 859 | err_rproc_put: |
| 860 | rproc_put(rproc); |
| 861 | |
| 862 | return ret; |
| 863 | } |
| 864 | |
| 865 | static int ipa_remove(struct platform_device *pdev) |
| 866 | { |
| 867 | struct ipa *ipa = dev_get_drvdata(&pdev->dev); |
| 868 | struct rproc *rproc = ipa->modem_rproc; |
| 869 | struct ipa_clock *clock = ipa->clock; |
| 870 | int ret; |
| 871 | |
| 872 | if (ipa->setup_complete) { |
| 873 | ret = ipa_modem_stop(ipa); |
| 874 | if (ret) |
| 875 | return ret; |
| 876 | |
| 877 | ipa_teardown(ipa); |
| 878 | } |
| 879 | |
| 880 | ipa_deconfig(ipa); |
| 881 | ipa_modem_exit(ipa); |
| 882 | ipa_table_exit(ipa); |
| 883 | ipa_endpoint_exit(ipa); |
| 884 | gsi_exit(&ipa->gsi); |
| 885 | ipa_mem_exit(ipa); |
| 886 | ipa_reg_exit(ipa); |
| 887 | kfree(ipa); |
| 888 | ipa_clock_exit(clock); |
| 889 | rproc_put(rproc); |
| 890 | |
| 891 | return 0; |
| 892 | } |
| 893 | |
| 894 | /** |
| 895 | * ipa_suspend() - Power management system suspend callback |
| 896 | * @dev: IPA device structure |
| 897 | * |
| 898 | * Return: Always returns zero |
| 899 | * |
| 900 | * Called by the PM framework when a system suspend operation is invoked. |
| 901 | * Suspends endpoints and releases the clock reference held to keep |
| 902 | * the IPA clock running until this point. |
| 903 | */ |
| 904 | static int ipa_suspend(struct device *dev) |
| 905 | { |
| 906 | struct ipa *ipa = dev_get_drvdata(dev); |
| 907 | |
| 908 | /* When a suspended RX endpoint has a packet ready to receive, we |
| 909 | * get an IPA SUSPEND interrupt. We trigger a system resume in |
| 910 | * that case, but only on the first such interrupt since suspend. |
| 911 | */ |
| 912 | __clear_bit(IPA_FLAG_RESUMED, ipa->flags); |
| 913 | |
| 914 | ipa_endpoint_suspend(ipa); |
| 915 | |
| 916 | ipa_clock_put(ipa); |
| 917 | |
| 918 | return 0; |
| 919 | } |
| 920 | |
| 921 | /** |
| 922 | * ipa_resume() - Power management system resume callback |
| 923 | * @dev: IPA device structure |
| 924 | * |
| 925 | * Return: Always returns 0 |
| 926 | * |
| 927 | * Called by the PM framework when a system resume operation is invoked. |
| 928 | * Takes an IPA clock reference to keep the clock running until suspend, |
| 929 | * and resumes endpoints. |
| 930 | */ |
| 931 | static int ipa_resume(struct device *dev) |
| 932 | { |
| 933 | struct ipa *ipa = dev_get_drvdata(dev); |
| 934 | |
| 935 | /* This clock reference will keep the IPA out of suspend |
| 936 | * until we get a power management suspend request. |
| 937 | */ |
| 938 | ipa_clock_get(ipa); |
| 939 | |
| 940 | ipa_endpoint_resume(ipa); |
| 941 | |
| 942 | return 0; |
| 943 | } |
| 944 | |
| 945 | static const struct dev_pm_ops ipa_pm_ops = { |
| 946 | .suspend = ipa_suspend, |
| 947 | .resume = ipa_resume, |
| 948 | }; |
| 949 | |
| 950 | static struct platform_driver ipa_driver = { |
| 951 | .probe = ipa_probe, |
| 952 | .remove = ipa_remove, |
| 953 | .driver = { |
| 954 | .name = "ipa", |
| 955 | .pm = &ipa_pm_ops, |
| 956 | .of_match_table = ipa_match, |
| 957 | }, |
| 958 | }; |
| 959 | |
| 960 | module_platform_driver(ipa_driver); |
| 961 | |
| 962 | MODULE_LICENSE("GPL v2"); |
| 963 | MODULE_DESCRIPTION("Qualcomm IP Accelerator device driver"); |