Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | */ |
| 32 | |
| 33 | #ifndef MLX5_DRIVER_H |
| 34 | #define MLX5_DRIVER_H |
| 35 | |
| 36 | #include <linux/kernel.h> |
| 37 | #include <linux/completion.h> |
| 38 | #include <linux/pci.h> |
| 39 | #include <linux/irq.h> |
| 40 | #include <linux/spinlock_types.h> |
| 41 | #include <linux/semaphore.h> |
| 42 | #include <linux/slab.h> |
| 43 | #include <linux/vmalloc.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 44 | #include <linux/xarray.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 45 | #include <linux/workqueue.h> |
| 46 | #include <linux/mempool.h> |
| 47 | #include <linux/interrupt.h> |
| 48 | #include <linux/idr.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 49 | #include <linux/notifier.h> |
| 50 | #include <linux/refcount.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 51 | |
| 52 | #include <linux/mlx5/device.h> |
| 53 | #include <linux/mlx5/doorbell.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 54 | #include <linux/mlx5/eq.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 55 | #include <linux/timecounter.h> |
| 56 | #include <linux/ptp_clock_kernel.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 57 | #include <net/devlink.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 58 | |
| 59 | enum { |
| 60 | MLX5_BOARD_ID_LEN = 64, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 61 | }; |
| 62 | |
| 63 | enum { |
| 64 | /* one minute for the sake of bringup. Generally, commands must always |
| 65 | * complete and we may need to increase this timeout value |
| 66 | */ |
| 67 | MLX5_CMD_TIMEOUT_MSEC = 60 * 1000, |
| 68 | MLX5_CMD_WQ_MAX_NAME = 32, |
| 69 | }; |
| 70 | |
| 71 | enum { |
| 72 | CMD_OWNER_SW = 0x0, |
| 73 | CMD_OWNER_HW = 0x1, |
| 74 | CMD_STATUS_SUCCESS = 0, |
| 75 | }; |
| 76 | |
| 77 | enum mlx5_sqp_t { |
| 78 | MLX5_SQP_SMI = 0, |
| 79 | MLX5_SQP_GSI = 1, |
| 80 | MLX5_SQP_IEEE_1588 = 2, |
| 81 | MLX5_SQP_SNIFFER = 3, |
| 82 | MLX5_SQP_SYNC_UMR = 4, |
| 83 | }; |
| 84 | |
| 85 | enum { |
| 86 | MLX5_MAX_PORTS = 2, |
| 87 | }; |
| 88 | |
| 89 | enum { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 90 | MLX5_ATOMIC_MODE_OFFSET = 16, |
| 91 | MLX5_ATOMIC_MODE_IB_COMP = 1, |
| 92 | MLX5_ATOMIC_MODE_CX = 2, |
| 93 | MLX5_ATOMIC_MODE_8B = 3, |
| 94 | MLX5_ATOMIC_MODE_16B = 4, |
| 95 | MLX5_ATOMIC_MODE_32B = 5, |
| 96 | MLX5_ATOMIC_MODE_64B = 6, |
| 97 | MLX5_ATOMIC_MODE_128B = 7, |
| 98 | MLX5_ATOMIC_MODE_256B = 8, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 99 | }; |
| 100 | |
| 101 | enum { |
| 102 | MLX5_REG_QPTS = 0x4002, |
| 103 | MLX5_REG_QETCR = 0x4005, |
| 104 | MLX5_REG_QTCT = 0x400a, |
| 105 | MLX5_REG_QPDPM = 0x4013, |
| 106 | MLX5_REG_QCAM = 0x4019, |
| 107 | MLX5_REG_DCBX_PARAM = 0x4020, |
| 108 | MLX5_REG_DCBX_APP = 0x4021, |
| 109 | MLX5_REG_FPGA_CAP = 0x4022, |
| 110 | MLX5_REG_FPGA_CTRL = 0x4023, |
| 111 | MLX5_REG_FPGA_ACCESS_REG = 0x4024, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 112 | MLX5_REG_CORE_DUMP = 0x402e, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 113 | MLX5_REG_PCAP = 0x5001, |
| 114 | MLX5_REG_PMTU = 0x5003, |
| 115 | MLX5_REG_PTYS = 0x5004, |
| 116 | MLX5_REG_PAOS = 0x5006, |
| 117 | MLX5_REG_PFCC = 0x5007, |
| 118 | MLX5_REG_PPCNT = 0x5008, |
| 119 | MLX5_REG_PPTB = 0x500b, |
| 120 | MLX5_REG_PBMC = 0x500c, |
| 121 | MLX5_REG_PMAOS = 0x5012, |
| 122 | MLX5_REG_PUDE = 0x5009, |
| 123 | MLX5_REG_PMPE = 0x5010, |
| 124 | MLX5_REG_PELC = 0x500e, |
| 125 | MLX5_REG_PVLC = 0x500f, |
| 126 | MLX5_REG_PCMR = 0x5041, |
| 127 | MLX5_REG_PMLP = 0x5002, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 128 | MLX5_REG_PPLM = 0x5023, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 129 | MLX5_REG_PCAM = 0x507f, |
| 130 | MLX5_REG_NODE_DESC = 0x6001, |
| 131 | MLX5_REG_HOST_ENDIANNESS = 0x7004, |
| 132 | MLX5_REG_MCIA = 0x9014, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 133 | MLX5_REG_MFRL = 0x9028, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 134 | MLX5_REG_MLCR = 0x902b, |
| 135 | MLX5_REG_MTRC_CAP = 0x9040, |
| 136 | MLX5_REG_MTRC_CONF = 0x9041, |
| 137 | MLX5_REG_MTRC_STDB = 0x9042, |
| 138 | MLX5_REG_MTRC_CTRL = 0x9043, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 139 | MLX5_REG_MPEIN = 0x9050, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 140 | MLX5_REG_MPCNT = 0x9051, |
| 141 | MLX5_REG_MTPPS = 0x9053, |
| 142 | MLX5_REG_MTPPSE = 0x9054, |
| 143 | MLX5_REG_MPEGC = 0x9056, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 144 | MLX5_REG_MCQS = 0x9060, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 145 | MLX5_REG_MCQI = 0x9061, |
| 146 | MLX5_REG_MCC = 0x9062, |
| 147 | MLX5_REG_MCDA = 0x9063, |
| 148 | MLX5_REG_MCAM = 0x907f, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 149 | MLX5_REG_MIRC = 0x9162, |
| 150 | MLX5_REG_SBCAM = 0xB01F, |
| 151 | MLX5_REG_RESOURCE_DUMP = 0xC000, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 152 | }; |
| 153 | |
| 154 | enum mlx5_qpts_trust_state { |
| 155 | MLX5_QPTS_TRUST_PCP = 1, |
| 156 | MLX5_QPTS_TRUST_DSCP = 2, |
| 157 | }; |
| 158 | |
| 159 | enum mlx5_dcbx_oper_mode { |
| 160 | MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0, |
| 161 | MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3, |
| 162 | }; |
| 163 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 164 | enum { |
| 165 | MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, |
| 166 | MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 167 | MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2, |
| 168 | MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 169 | }; |
| 170 | |
| 171 | enum mlx5_page_fault_resume_flags { |
| 172 | MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0, |
| 173 | MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1, |
| 174 | MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2, |
| 175 | MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7, |
| 176 | }; |
| 177 | |
| 178 | enum dbg_rsc_type { |
| 179 | MLX5_DBG_RSC_QP, |
| 180 | MLX5_DBG_RSC_EQ, |
| 181 | MLX5_DBG_RSC_CQ, |
| 182 | }; |
| 183 | |
| 184 | enum port_state_policy { |
| 185 | MLX5_POLICY_DOWN = 0, |
| 186 | MLX5_POLICY_UP = 1, |
| 187 | MLX5_POLICY_FOLLOW = 2, |
| 188 | MLX5_POLICY_INVALID = 0xffffffff |
| 189 | }; |
| 190 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 191 | enum mlx5_coredev_type { |
| 192 | MLX5_COREDEV_PF, |
| 193 | MLX5_COREDEV_VF |
| 194 | }; |
| 195 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 196 | struct mlx5_field_desc { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 197 | int i; |
| 198 | }; |
| 199 | |
| 200 | struct mlx5_rsc_debug { |
| 201 | struct mlx5_core_dev *dev; |
| 202 | void *object; |
| 203 | enum dbg_rsc_type type; |
| 204 | struct dentry *root; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 205 | struct mlx5_field_desc fields[]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 206 | }; |
| 207 | |
| 208 | enum mlx5_dev_event { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 209 | MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */ |
| 210 | MLX5_DEV_EVENT_PORT_AFFINITY = 129, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 211 | }; |
| 212 | |
| 213 | enum mlx5_port_status { |
| 214 | MLX5_PORT_UP = 1, |
| 215 | MLX5_PORT_DOWN = 2, |
| 216 | }; |
| 217 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 218 | enum mlx5_cmdif_state { |
| 219 | MLX5_CMDIF_STATE_UNINITIALIZED, |
| 220 | MLX5_CMDIF_STATE_UP, |
| 221 | MLX5_CMDIF_STATE_DOWN, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 222 | }; |
| 223 | |
| 224 | struct mlx5_cmd_first { |
| 225 | __be32 data[4]; |
| 226 | }; |
| 227 | |
| 228 | struct mlx5_cmd_msg { |
| 229 | struct list_head list; |
| 230 | struct cmd_msg_cache *parent; |
| 231 | u32 len; |
| 232 | struct mlx5_cmd_first first; |
| 233 | struct mlx5_cmd_mailbox *next; |
| 234 | }; |
| 235 | |
| 236 | struct mlx5_cmd_debug { |
| 237 | struct dentry *dbg_root; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 238 | void *in_msg; |
| 239 | void *out_msg; |
| 240 | u8 status; |
| 241 | u16 inlen; |
| 242 | u16 outlen; |
| 243 | }; |
| 244 | |
| 245 | struct cmd_msg_cache { |
| 246 | /* protect block chain allocations |
| 247 | */ |
| 248 | spinlock_t lock; |
| 249 | struct list_head head; |
| 250 | unsigned int max_inbox_size; |
| 251 | unsigned int num_ent; |
| 252 | }; |
| 253 | |
| 254 | enum { |
| 255 | MLX5_NUM_COMMAND_CACHES = 5, |
| 256 | }; |
| 257 | |
| 258 | struct mlx5_cmd_stats { |
| 259 | u64 sum; |
| 260 | u64 n; |
| 261 | struct dentry *root; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 262 | /* protect command average calculations */ |
| 263 | spinlock_t lock; |
| 264 | }; |
| 265 | |
| 266 | struct mlx5_cmd { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 267 | struct mlx5_nb nb; |
| 268 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 269 | enum mlx5_cmdif_state state; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 270 | void *cmd_alloc_buf; |
| 271 | dma_addr_t alloc_dma; |
| 272 | int alloc_size; |
| 273 | void *cmd_buf; |
| 274 | dma_addr_t dma; |
| 275 | u16 cmdif_rev; |
| 276 | u8 log_sz; |
| 277 | u8 log_stride; |
| 278 | int max_reg_cmds; |
| 279 | int events; |
| 280 | u32 __iomem *vector; |
| 281 | |
| 282 | /* protect command queue allocations |
| 283 | */ |
| 284 | spinlock_t alloc_lock; |
| 285 | |
| 286 | /* protect token allocations |
| 287 | */ |
| 288 | spinlock_t token_lock; |
| 289 | u8 token; |
| 290 | unsigned long bitmask; |
| 291 | char wq_name[MLX5_CMD_WQ_MAX_NAME]; |
| 292 | struct workqueue_struct *wq; |
| 293 | struct semaphore sem; |
| 294 | struct semaphore pages_sem; |
| 295 | int mode; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 296 | u16 allowed_opcode; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 297 | struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; |
| 298 | struct dma_pool *pool; |
| 299 | struct mlx5_cmd_debug dbg; |
| 300 | struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; |
| 301 | int checksum_disabled; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 302 | struct mlx5_cmd_stats *stats; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 303 | }; |
| 304 | |
| 305 | struct mlx5_port_caps { |
| 306 | int gid_table_len; |
| 307 | int pkey_table_len; |
| 308 | u8 ext_port_cap; |
| 309 | bool has_smi; |
| 310 | }; |
| 311 | |
| 312 | struct mlx5_cmd_mailbox { |
| 313 | void *buf; |
| 314 | dma_addr_t dma; |
| 315 | struct mlx5_cmd_mailbox *next; |
| 316 | }; |
| 317 | |
| 318 | struct mlx5_buf_list { |
| 319 | void *buf; |
| 320 | dma_addr_t map; |
| 321 | }; |
| 322 | |
| 323 | struct mlx5_frag_buf { |
| 324 | struct mlx5_buf_list *frags; |
| 325 | int npages; |
| 326 | int size; |
| 327 | u8 page_shift; |
| 328 | }; |
| 329 | |
| 330 | struct mlx5_frag_buf_ctrl { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 331 | struct mlx5_buf_list *frags; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 332 | u32 sz_m1; |
| 333 | u16 frag_sz_m1; |
| 334 | u16 strides_offset; |
| 335 | u8 log_sz; |
| 336 | u8 log_stride; |
| 337 | u8 log_frag_strides; |
| 338 | }; |
| 339 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 340 | struct mlx5_core_psv { |
| 341 | u32 psv_idx; |
| 342 | struct psv_layout { |
| 343 | u32 pd; |
| 344 | u16 syndrome; |
| 345 | u16 reserved; |
| 346 | u16 bg; |
| 347 | u16 app_tag; |
| 348 | u32 ref_tag; |
| 349 | } psv; |
| 350 | }; |
| 351 | |
| 352 | struct mlx5_core_sig_ctx { |
| 353 | struct mlx5_core_psv psv_memory; |
| 354 | struct mlx5_core_psv psv_wire; |
| 355 | struct ib_sig_err err_item; |
| 356 | bool sig_status_checked; |
| 357 | bool sig_err_exists; |
| 358 | u32 sigerr_count; |
| 359 | }; |
| 360 | |
| 361 | enum { |
| 362 | MLX5_MKEY_MR = 1, |
| 363 | MLX5_MKEY_MW, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 364 | MLX5_MKEY_INDIRECT_DEVX, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 365 | }; |
| 366 | |
| 367 | struct mlx5_core_mkey { |
| 368 | u64 iova; |
| 369 | u64 size; |
| 370 | u32 key; |
| 371 | u32 pd; |
| 372 | u32 type; |
| 373 | }; |
| 374 | |
| 375 | #define MLX5_24BIT_MASK ((1 << 24) - 1) |
| 376 | |
| 377 | enum mlx5_res_type { |
| 378 | MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP, |
| 379 | MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ, |
| 380 | MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ, |
| 381 | MLX5_RES_SRQ = 3, |
| 382 | MLX5_RES_XSRQ = 4, |
| 383 | MLX5_RES_XRQ = 5, |
| 384 | MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT, |
| 385 | }; |
| 386 | |
| 387 | struct mlx5_core_rsc_common { |
| 388 | enum mlx5_res_type res; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 389 | refcount_t refcount; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 390 | struct completion free; |
| 391 | }; |
| 392 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 393 | struct mlx5_uars_page { |
| 394 | void __iomem *map; |
| 395 | bool wc; |
| 396 | u32 index; |
| 397 | struct list_head list; |
| 398 | unsigned int bfregs; |
| 399 | unsigned long *reg_bitmap; /* for non fast path bf regs */ |
| 400 | unsigned long *fp_bitmap; |
| 401 | unsigned int reg_avail; |
| 402 | unsigned int fp_avail; |
| 403 | struct kref ref_count; |
| 404 | struct mlx5_core_dev *mdev; |
| 405 | }; |
| 406 | |
| 407 | struct mlx5_bfreg_head { |
| 408 | /* protect blue flame registers allocations */ |
| 409 | struct mutex lock; |
| 410 | struct list_head list; |
| 411 | }; |
| 412 | |
| 413 | struct mlx5_bfreg_data { |
| 414 | struct mlx5_bfreg_head reg_head; |
| 415 | struct mlx5_bfreg_head wc_head; |
| 416 | }; |
| 417 | |
| 418 | struct mlx5_sq_bfreg { |
| 419 | void __iomem *map; |
| 420 | struct mlx5_uars_page *up; |
| 421 | bool wc; |
| 422 | u32 index; |
| 423 | unsigned int offset; |
| 424 | }; |
| 425 | |
| 426 | struct mlx5_core_health { |
| 427 | struct health_buffer __iomem *health; |
| 428 | __be32 __iomem *health_counter; |
| 429 | struct timer_list timer; |
| 430 | u32 prev; |
| 431 | int miss_counter; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 432 | u8 synd; |
| 433 | u32 fatal_error; |
| 434 | u32 crdump_size; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 435 | /* wq spinlock to synchronize draining */ |
| 436 | spinlock_t wq_lock; |
| 437 | struct workqueue_struct *wq; |
| 438 | unsigned long flags; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 439 | struct work_struct fatal_report_work; |
| 440 | struct work_struct report_work; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 441 | struct delayed_work recover_work; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 442 | struct devlink_health_reporter *fw_reporter; |
| 443 | struct devlink_health_reporter *fw_fatal_reporter; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 444 | }; |
| 445 | |
| 446 | struct mlx5_qp_table { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 447 | struct notifier_block nb; |
| 448 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 449 | /* protect radix tree |
| 450 | */ |
| 451 | spinlock_t lock; |
| 452 | struct radix_tree_root tree; |
| 453 | }; |
| 454 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 455 | struct mlx5_vf_context { |
| 456 | int enabled; |
| 457 | u64 port_guid; |
| 458 | u64 node_guid; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 459 | /* Valid bits are used to validate administrative guid only. |
| 460 | * Enabled after ndo_set_vf_guid |
| 461 | */ |
| 462 | u8 port_guid_valid:1; |
| 463 | u8 node_guid_valid:1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 464 | enum port_state_policy policy; |
| 465 | }; |
| 466 | |
| 467 | struct mlx5_core_sriov { |
| 468 | struct mlx5_vf_context *vfs_ctx; |
| 469 | int num_vfs; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 470 | u16 max_vfs; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 471 | }; |
| 472 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 473 | struct mlx5_fc_pool { |
| 474 | struct mlx5_core_dev *dev; |
| 475 | struct mutex pool_lock; /* protects pool lists */ |
| 476 | struct list_head fully_used; |
| 477 | struct list_head partially_used; |
| 478 | struct list_head unused; |
| 479 | int available_fcs; |
| 480 | int used_fcs; |
| 481 | int threshold; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 482 | }; |
| 483 | |
| 484 | struct mlx5_fc_stats { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 485 | spinlock_t counters_idr_lock; /* protects counters_idr */ |
| 486 | struct idr counters_idr; |
| 487 | struct list_head counters; |
| 488 | struct llist_head addlist; |
| 489 | struct llist_head dellist; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 490 | |
| 491 | struct workqueue_struct *wq; |
| 492 | struct delayed_work work; |
| 493 | unsigned long next_query; |
| 494 | unsigned long sampling_interval; /* jiffies */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 495 | u32 *bulk_query_out; |
| 496 | struct mlx5_fc_pool fc_pool; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 497 | }; |
| 498 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 499 | struct mlx5_events; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 500 | struct mlx5_mpfs; |
| 501 | struct mlx5_eswitch; |
| 502 | struct mlx5_lag; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 503 | struct mlx5_devcom; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 504 | struct mlx5_fw_reset; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 505 | struct mlx5_eq_table; |
| 506 | struct mlx5_irq_table; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 507 | |
| 508 | struct mlx5_rate_limit { |
| 509 | u32 rate; |
| 510 | u32 max_burst_sz; |
| 511 | u16 typical_pkt_sz; |
| 512 | }; |
| 513 | |
| 514 | struct mlx5_rl_entry { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 515 | u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)]; |
| 516 | u16 index; |
| 517 | u64 refcount; |
| 518 | u16 uid; |
| 519 | u8 dedicated : 1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 520 | }; |
| 521 | |
| 522 | struct mlx5_rl_table { |
| 523 | /* protect rate limit table */ |
| 524 | struct mutex rl_lock; |
| 525 | u16 max_size; |
| 526 | u32 max_rate; |
| 527 | u32 min_rate; |
| 528 | struct mlx5_rl_entry *rl_entry; |
| 529 | }; |
| 530 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 531 | struct mlx5_core_roce { |
| 532 | struct mlx5_flow_table *ft; |
| 533 | struct mlx5_flow_group *fg; |
| 534 | struct mlx5_flow_handle *allow_rule; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 535 | }; |
| 536 | |
| 537 | struct mlx5_priv { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 538 | /* IRQ table valid only for real pci devices PF or VF */ |
| 539 | struct mlx5_irq_table *irq_table; |
| 540 | struct mlx5_eq_table *eq_table; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 541 | |
| 542 | /* pages stuff */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 543 | struct mlx5_nb pg_nb; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 544 | struct workqueue_struct *pg_wq; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 545 | struct xarray page_root_xa; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 546 | int fw_pages; |
| 547 | atomic_t reg_pages; |
| 548 | struct list_head free_list; |
| 549 | int vfs_pages; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 550 | int peer_pf_pages; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 551 | |
| 552 | struct mlx5_core_health health; |
| 553 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 554 | /* start: qp staff */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 555 | struct dentry *qp_debugfs; |
| 556 | struct dentry *eq_debugfs; |
| 557 | struct dentry *cq_debugfs; |
| 558 | struct dentry *cmdif_debugfs; |
| 559 | /* end: qp staff */ |
| 560 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 561 | /* start: alloc staff */ |
| 562 | /* protect buffer alocation according to numa node */ |
| 563 | struct mutex alloc_mutex; |
| 564 | int numa_node; |
| 565 | |
| 566 | struct mutex pgdir_mutex; |
| 567 | struct list_head pgdir_list; |
| 568 | /* end: alloc staff */ |
| 569 | struct dentry *dbg_root; |
| 570 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 571 | struct list_head dev_list; |
| 572 | struct list_head ctx_list; |
| 573 | spinlock_t ctx_lock; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 574 | struct mlx5_events *events; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 575 | |
| 576 | struct mlx5_flow_steering *steering; |
| 577 | struct mlx5_mpfs *mpfs; |
| 578 | struct mlx5_eswitch *eswitch; |
| 579 | struct mlx5_core_sriov sriov; |
| 580 | struct mlx5_lag *lag; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 581 | struct mlx5_devcom *devcom; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 582 | struct mlx5_fw_reset *fw_reset; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 583 | struct mlx5_core_roce roce; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 584 | struct mlx5_fc_stats fc_stats; |
| 585 | struct mlx5_rl_table rl_table; |
| 586 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 587 | struct mlx5_bfreg_data bfregs; |
| 588 | struct mlx5_uars_page *uar; |
| 589 | }; |
| 590 | |
| 591 | enum mlx5_device_state { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 592 | MLX5_DEVICE_STATE_UNINITIALIZED, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 593 | MLX5_DEVICE_STATE_UP, |
| 594 | MLX5_DEVICE_STATE_INTERNAL_ERROR, |
| 595 | }; |
| 596 | |
| 597 | enum mlx5_interface_state { |
| 598 | MLX5_INTERFACE_STATE_UP = BIT(0), |
| 599 | }; |
| 600 | |
| 601 | enum mlx5_pci_status { |
| 602 | MLX5_PCI_STATUS_DISABLED, |
| 603 | MLX5_PCI_STATUS_ENABLED, |
| 604 | }; |
| 605 | |
| 606 | enum mlx5_pagefault_type_flags { |
| 607 | MLX5_PFAULT_REQUESTOR = 1 << 0, |
| 608 | MLX5_PFAULT_WRITE = 1 << 1, |
| 609 | MLX5_PFAULT_RDMA = 1 << 2, |
| 610 | }; |
| 611 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 612 | struct mlx5_td { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 613 | /* protects tirs list changes while tirs refresh */ |
| 614 | struct mutex list_lock; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 615 | struct list_head tirs_list; |
| 616 | u32 tdn; |
| 617 | }; |
| 618 | |
| 619 | struct mlx5e_resources { |
| 620 | u32 pdn; |
| 621 | struct mlx5_td td; |
| 622 | struct mlx5_core_mkey mkey; |
| 623 | struct mlx5_sq_bfreg bfreg; |
| 624 | }; |
| 625 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 626 | enum mlx5_sw_icm_type { |
| 627 | MLX5_SW_ICM_TYPE_STEERING, |
| 628 | MLX5_SW_ICM_TYPE_HEADER_MODIFY, |
| 629 | }; |
| 630 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 631 | #define MLX5_MAX_RESERVED_GIDS 8 |
| 632 | |
| 633 | struct mlx5_rsvd_gids { |
| 634 | unsigned int start; |
| 635 | unsigned int count; |
| 636 | struct ida ida; |
| 637 | }; |
| 638 | |
| 639 | #define MAX_PIN_NUM 8 |
| 640 | struct mlx5_pps { |
| 641 | u8 pin_caps[MAX_PIN_NUM]; |
| 642 | struct work_struct out_work; |
| 643 | u64 start[MAX_PIN_NUM]; |
| 644 | u8 enabled; |
| 645 | }; |
| 646 | |
| 647 | struct mlx5_clock { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 648 | struct mlx5_nb pps_nb; |
| 649 | seqlock_t lock; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 650 | struct cyclecounter cycles; |
| 651 | struct timecounter tc; |
| 652 | struct hwtstamp_config hwtstamp_config; |
| 653 | u32 nominal_c_mult; |
| 654 | unsigned long overflow_period; |
| 655 | struct delayed_work overflow_work; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 656 | struct ptp_clock *ptp; |
| 657 | struct ptp_clock_info ptp_info; |
| 658 | struct mlx5_pps pps_info; |
| 659 | }; |
| 660 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 661 | struct mlx5_dm; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 662 | struct mlx5_fw_tracer; |
| 663 | struct mlx5_vxlan; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 664 | struct mlx5_geneve; |
| 665 | struct mlx5_hv_vhca; |
| 666 | |
| 667 | #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) |
| 668 | #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 669 | |
| 670 | struct mlx5_core_dev { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 671 | struct device *device; |
| 672 | enum mlx5_coredev_type coredev_type; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 673 | struct pci_dev *pdev; |
| 674 | /* sync pci state */ |
| 675 | struct mutex pci_status_mutex; |
| 676 | enum mlx5_pci_status pci_status; |
| 677 | u8 rev_id; |
| 678 | char board_id[MLX5_BOARD_ID_LEN]; |
| 679 | struct mlx5_cmd cmd; |
| 680 | struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; |
| 681 | struct { |
| 682 | u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; |
| 683 | u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; |
| 684 | u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 685 | u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 686 | u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; |
| 687 | u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 688 | u8 embedded_cpu; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 689 | } caps; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 690 | u64 sys_image_guid; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 691 | phys_addr_t iseg_base; |
| 692 | struct mlx5_init_seg __iomem *iseg; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 693 | phys_addr_t bar_addr; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 694 | enum mlx5_device_state state; |
| 695 | /* sync interface state */ |
| 696 | struct mutex intf_state_mutex; |
| 697 | unsigned long intf_state; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 698 | struct mlx5_priv priv; |
| 699 | struct mlx5_profile *profile; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 700 | u32 issi; |
| 701 | struct mlx5e_resources mlx5e_res; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 702 | struct mlx5_dm *dm; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 703 | struct mlx5_vxlan *vxlan; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 704 | struct mlx5_geneve *geneve; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 705 | struct { |
| 706 | struct mlx5_rsvd_gids reserved_gids; |
| 707 | u32 roce_en; |
| 708 | } roce; |
| 709 | #ifdef CONFIG_MLX5_FPGA |
| 710 | struct mlx5_fpga_device *fpga; |
| 711 | #endif |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 712 | #ifdef CONFIG_MLX5_ACCEL |
| 713 | const struct mlx5_accel_ipsec_ops *ipsec_ops; |
| 714 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 715 | struct mlx5_clock clock; |
| 716 | struct mlx5_ib_clock_info *clock_info; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 717 | struct mlx5_fw_tracer *tracer; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 718 | struct mlx5_rsc_dump *rsc_dump; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 719 | u32 vsc_addr; |
| 720 | struct mlx5_hv_vhca *hv_vhca; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 721 | }; |
| 722 | |
| 723 | struct mlx5_db { |
| 724 | __be32 *db; |
| 725 | union { |
| 726 | struct mlx5_db_pgdir *pgdir; |
| 727 | struct mlx5_ib_user_db_page *user_page; |
| 728 | } u; |
| 729 | dma_addr_t dma; |
| 730 | int index; |
| 731 | }; |
| 732 | |
| 733 | enum { |
| 734 | MLX5_COMP_EQ_SIZE = 1024, |
| 735 | }; |
| 736 | |
| 737 | enum { |
| 738 | MLX5_PTYS_IB = 1 << 0, |
| 739 | MLX5_PTYS_EN = 1 << 2, |
| 740 | }; |
| 741 | |
| 742 | typedef void (*mlx5_cmd_cbk_t)(int status, void *context); |
| 743 | |
| 744 | enum { |
| 745 | MLX5_CMD_ENT_STATE_PENDING_COMP, |
| 746 | }; |
| 747 | |
| 748 | struct mlx5_cmd_work_ent { |
| 749 | unsigned long state; |
| 750 | struct mlx5_cmd_msg *in; |
| 751 | struct mlx5_cmd_msg *out; |
| 752 | void *uout; |
| 753 | int uout_size; |
| 754 | mlx5_cmd_cbk_t callback; |
| 755 | struct delayed_work cb_timeout_work; |
| 756 | void *context; |
| 757 | int idx; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 758 | struct completion handling; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 759 | struct completion done; |
| 760 | struct mlx5_cmd *cmd; |
| 761 | struct work_struct work; |
| 762 | struct mlx5_cmd_layout *lay; |
| 763 | int ret; |
| 764 | int page_queue; |
| 765 | u8 status; |
| 766 | u8 token; |
| 767 | u64 ts1; |
| 768 | u64 ts2; |
| 769 | u16 op; |
| 770 | bool polling; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 771 | /* Track the max comp handlers */ |
| 772 | refcount_t refcnt; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 773 | }; |
| 774 | |
| 775 | struct mlx5_pas { |
| 776 | u64 pa; |
| 777 | u8 log_sz; |
| 778 | }; |
| 779 | |
| 780 | enum phy_port_state { |
| 781 | MLX5_AAA_111 |
| 782 | }; |
| 783 | |
| 784 | struct mlx5_hca_vport_context { |
| 785 | u32 field_select; |
| 786 | bool sm_virt_aware; |
| 787 | bool has_smi; |
| 788 | bool has_raw; |
| 789 | enum port_state_policy policy; |
| 790 | enum phy_port_state phys_state; |
| 791 | enum ib_port_state vport_state; |
| 792 | u8 port_physical_state; |
| 793 | u64 sys_image_guid; |
| 794 | u64 port_guid; |
| 795 | u64 node_guid; |
| 796 | u32 cap_mask1; |
| 797 | u32 cap_mask1_perm; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 798 | u16 cap_mask2; |
| 799 | u16 cap_mask2_perm; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 800 | u16 lid; |
| 801 | u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */ |
| 802 | u8 lmc; |
| 803 | u8 subnet_timeout; |
| 804 | u16 sm_lid; |
| 805 | u8 sm_sl; |
| 806 | u16 qkey_violation_counter; |
| 807 | u16 pkey_violation_counter; |
| 808 | bool grh_required; |
| 809 | }; |
| 810 | |
| 811 | static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset) |
| 812 | { |
| 813 | return buf->frags->buf + offset; |
| 814 | } |
| 815 | |
| 816 | #define STRUCT_FIELD(header, field) \ |
| 817 | .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ |
| 818 | .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field |
| 819 | |
| 820 | static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev) |
| 821 | { |
| 822 | return pci_get_drvdata(pdev); |
| 823 | } |
| 824 | |
| 825 | extern struct dentry *mlx5_debugfs_root; |
| 826 | |
| 827 | static inline u16 fw_rev_maj(struct mlx5_core_dev *dev) |
| 828 | { |
| 829 | return ioread32be(&dev->iseg->fw_rev) & 0xffff; |
| 830 | } |
| 831 | |
| 832 | static inline u16 fw_rev_min(struct mlx5_core_dev *dev) |
| 833 | { |
| 834 | return ioread32be(&dev->iseg->fw_rev) >> 16; |
| 835 | } |
| 836 | |
| 837 | static inline u16 fw_rev_sub(struct mlx5_core_dev *dev) |
| 838 | { |
| 839 | return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; |
| 840 | } |
| 841 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 842 | static inline u32 mlx5_base_mkey(const u32 key) |
| 843 | { |
| 844 | return key & 0xffffff00u; |
| 845 | } |
| 846 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 847 | static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags, |
| 848 | u8 log_stride, u8 log_sz, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 849 | u16 strides_offset, |
| 850 | struct mlx5_frag_buf_ctrl *fbc) |
| 851 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 852 | fbc->frags = frags; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 853 | fbc->log_stride = log_stride; |
| 854 | fbc->log_sz = log_sz; |
| 855 | fbc->sz_m1 = (1 << fbc->log_sz) - 1; |
| 856 | fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; |
| 857 | fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; |
| 858 | fbc->strides_offset = strides_offset; |
| 859 | } |
| 860 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 861 | static inline void mlx5_init_fbc(struct mlx5_buf_list *frags, |
| 862 | u8 log_stride, u8 log_sz, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 863 | struct mlx5_frag_buf_ctrl *fbc) |
| 864 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 865 | mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 866 | } |
| 867 | |
| 868 | static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, |
| 869 | u32 ix) |
| 870 | { |
| 871 | unsigned int frag; |
| 872 | |
| 873 | ix += fbc->strides_offset; |
| 874 | frag = ix >> fbc->log_frag_strides; |
| 875 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 876 | return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 877 | } |
| 878 | |
| 879 | static inline u32 |
| 880 | mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix) |
| 881 | { |
| 882 | u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1; |
| 883 | |
| 884 | return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); |
| 885 | } |
| 886 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 887 | enum { |
| 888 | CMD_ALLOWED_OPCODE_ALL, |
| 889 | }; |
| 890 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 891 | int mlx5_cmd_init(struct mlx5_core_dev *dev); |
| 892 | void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 893 | void mlx5_cmd_set_state(struct mlx5_core_dev *dev, |
| 894 | enum mlx5_cmdif_state cmdif_state); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 895 | void mlx5_cmd_use_events(struct mlx5_core_dev *dev); |
| 896 | void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 897 | void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 898 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 899 | struct mlx5_async_ctx { |
| 900 | struct mlx5_core_dev *dev; |
| 901 | atomic_t num_inflight; |
| 902 | struct wait_queue_head wait; |
| 903 | }; |
| 904 | |
| 905 | struct mlx5_async_work; |
| 906 | |
| 907 | typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context); |
| 908 | |
| 909 | struct mlx5_async_work { |
| 910 | struct mlx5_async_ctx *ctx; |
| 911 | mlx5_async_cbk_t user_callback; |
| 912 | }; |
| 913 | |
| 914 | void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, |
| 915 | struct mlx5_async_ctx *ctx); |
| 916 | void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx); |
| 917 | int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, |
| 918 | void *out, int out_size, mlx5_async_cbk_t callback, |
| 919 | struct mlx5_async_work *work); |
| 920 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 921 | int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, |
| 922 | int out_size); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 923 | |
| 924 | #define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out) \ |
| 925 | ({ \ |
| 926 | mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out, \ |
| 927 | MLX5_ST_SZ_BYTES(ifc_cmd##_out)); \ |
| 928 | }) |
| 929 | |
| 930 | #define mlx5_cmd_exec_in(dev, ifc_cmd, in) \ |
| 931 | ({ \ |
| 932 | u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {}; \ |
| 933 | mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out); \ |
| 934 | }) |
| 935 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 936 | int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, |
| 937 | void *out, int out_size); |
| 938 | void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 939 | bool mlx5_cmd_is_down(struct mlx5_core_dev *dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 940 | |
| 941 | int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); |
| 942 | int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); |
| 943 | int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 944 | void mlx5_health_flush(struct mlx5_core_dev *dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 945 | void mlx5_health_cleanup(struct mlx5_core_dev *dev); |
| 946 | int mlx5_health_init(struct mlx5_core_dev *dev); |
| 947 | void mlx5_start_health_poll(struct mlx5_core_dev *dev); |
| 948 | void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); |
| 949 | void mlx5_drain_health_wq(struct mlx5_core_dev *dev); |
| 950 | void mlx5_trigger_health_work(struct mlx5_core_dev *dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 951 | int mlx5_buf_alloc(struct mlx5_core_dev *dev, |
| 952 | int size, struct mlx5_frag_buf *buf); |
| 953 | void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); |
| 954 | int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, |
| 955 | struct mlx5_frag_buf *buf, int node); |
| 956 | void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); |
| 957 | struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, |
| 958 | gfp_t flags, int npages); |
| 959 | void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, |
| 960 | struct mlx5_cmd_mailbox *head); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 961 | int mlx5_core_create_mkey(struct mlx5_core_dev *dev, |
| 962 | struct mlx5_core_mkey *mkey, |
| 963 | u32 *in, int inlen); |
| 964 | int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, |
| 965 | struct mlx5_core_mkey *mkey); |
| 966 | int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, |
| 967 | u32 *out, int outlen); |
| 968 | int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); |
| 969 | int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 970 | int mlx5_pagealloc_init(struct mlx5_core_dev *dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 971 | void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 972 | void mlx5_pagealloc_start(struct mlx5_core_dev *dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 973 | void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); |
| 974 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 975 | s32 npages, bool ec_function); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 976 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); |
| 977 | int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); |
| 978 | void mlx5_register_debugfs(void); |
| 979 | void mlx5_unregister_debugfs(void); |
| 980 | |
| 981 | void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 982 | void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 983 | void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 984 | int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 985 | int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); |
| 986 | int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); |
| 987 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 988 | void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 989 | void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); |
| 990 | int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, |
| 991 | int size_in, void *data_out, int size_out, |
| 992 | u16 reg_num, int arg, int write); |
| 993 | |
| 994 | int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); |
| 995 | int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, |
| 996 | int node); |
| 997 | void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); |
| 998 | |
| 999 | const char *mlx5_command_str(int command); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1000 | void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1001 | void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); |
| 1002 | int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, |
| 1003 | int npsvs, u32 *sig_index); |
| 1004 | int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); |
| 1005 | void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); |
| 1006 | int mlx5_query_odp_caps(struct mlx5_core_dev *dev, |
| 1007 | struct mlx5_odp_caps *odp_caps); |
| 1008 | int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, |
| 1009 | u8 port_num, void *out, size_t sz); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1010 | |
| 1011 | int mlx5_init_rl_table(struct mlx5_core_dev *dev); |
| 1012 | void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); |
| 1013 | int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, |
| 1014 | struct mlx5_rate_limit *rl); |
| 1015 | void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl); |
| 1016 | bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1017 | int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid, |
| 1018 | bool dedicated_entry, u16 *index); |
| 1019 | void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1020 | bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0, |
| 1021 | struct mlx5_rate_limit *rl_1); |
| 1022 | int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, |
| 1023 | bool map_wc, bool fast_path); |
| 1024 | void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); |
| 1025 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1026 | unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev); |
| 1027 | struct cpumask * |
| 1028 | mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1029 | unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev); |
| 1030 | int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, |
| 1031 | u8 roce_version, u8 roce_l3_type, const u8 *gid, |
| 1032 | const u8 *mac, bool vlan, u16 vlan_id, u8 port_num); |
| 1033 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1034 | static inline u32 mlx5_mkey_to_idx(u32 mkey) |
| 1035 | { |
| 1036 | return mkey >> 8; |
| 1037 | } |
| 1038 | |
| 1039 | static inline u32 mlx5_idx_to_mkey(u32 mkey_idx) |
| 1040 | { |
| 1041 | return mkey_idx << 8; |
| 1042 | } |
| 1043 | |
| 1044 | static inline u8 mlx5_mkey_variant(u32 mkey) |
| 1045 | { |
| 1046 | return mkey & 0xff; |
| 1047 | } |
| 1048 | |
| 1049 | enum { |
| 1050 | MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, |
| 1051 | MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, |
| 1052 | }; |
| 1053 | |
| 1054 | enum { |
| 1055 | MR_CACHE_LAST_STD_ENTRY = 20, |
| 1056 | MLX5_IMR_MTT_CACHE_ENTRY, |
| 1057 | MLX5_IMR_KSM_CACHE_ENTRY, |
| 1058 | MAX_MR_CACHE_ENTRIES |
| 1059 | }; |
| 1060 | |
| 1061 | enum { |
| 1062 | MLX5_INTERFACE_PROTOCOL_IB = 0, |
| 1063 | MLX5_INTERFACE_PROTOCOL_ETH = 1, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1064 | MLX5_INTERFACE_PROTOCOL_VDPA = 2, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1065 | }; |
| 1066 | |
| 1067 | struct mlx5_interface { |
| 1068 | void * (*add)(struct mlx5_core_dev *dev); |
| 1069 | void (*remove)(struct mlx5_core_dev *dev, void *context); |
| 1070 | int (*attach)(struct mlx5_core_dev *dev, void *context); |
| 1071 | void (*detach)(struct mlx5_core_dev *dev, void *context); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1072 | int protocol; |
| 1073 | struct list_head list; |
| 1074 | }; |
| 1075 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1076 | int mlx5_register_interface(struct mlx5_interface *intf); |
| 1077 | void mlx5_unregister_interface(struct mlx5_interface *intf); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1078 | int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); |
| 1079 | int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); |
| 1080 | int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb); |
| 1081 | int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb); |
| 1082 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1083 | int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); |
| 1084 | |
| 1085 | int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); |
| 1086 | int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1087 | bool mlx5_lag_is_roce(struct mlx5_core_dev *dev); |
| 1088 | bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev); |
| 1089 | bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1090 | bool mlx5_lag_is_active(struct mlx5_core_dev *dev); |
| 1091 | struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1092 | u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, |
| 1093 | struct net_device *slave); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1094 | int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, |
| 1095 | u64 *values, |
| 1096 | int num_counters, |
| 1097 | size_t *offsets); |
| 1098 | struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); |
| 1099 | void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1100 | int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1101 | u64 length, u32 log_alignment, u16 uid, |
| 1102 | phys_addr_t *addr, u32 *obj_id); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1103 | int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, |
| 1104 | u64 length, u16 uid, phys_addr_t addr, u32 obj_id); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1105 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1106 | #ifdef CONFIG_MLX5_CORE_IPOIB |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1107 | struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, |
| 1108 | struct ib_device *ibdev, |
| 1109 | const char *name, |
| 1110 | void (*setup)(struct net_device *)); |
| 1111 | #endif /* CONFIG_MLX5_CORE_IPOIB */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1112 | int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, |
| 1113 | struct ib_device *device, |
| 1114 | struct rdma_netdev_alloc_params *params); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1115 | |
| 1116 | struct mlx5_profile { |
| 1117 | u64 mask; |
| 1118 | u8 log_max_qp; |
| 1119 | struct { |
| 1120 | int size; |
| 1121 | int limit; |
| 1122 | } mr_cache[MAX_MR_CACHE_ENTRIES]; |
| 1123 | }; |
| 1124 | |
| 1125 | enum { |
| 1126 | MLX5_PCI_DEV_IS_VF = 1 << 0, |
| 1127 | }; |
| 1128 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1129 | static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1130 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1131 | return dev->coredev_type == MLX5_COREDEV_PF; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1132 | } |
| 1133 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1134 | static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev) |
| 1135 | { |
| 1136 | return dev->coredev_type == MLX5_COREDEV_VF; |
| 1137 | } |
| 1138 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1139 | static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev) |
| 1140 | { |
| 1141 | return dev->caps.embedded_cpu; |
| 1142 | } |
| 1143 | |
| 1144 | static inline bool |
| 1145 | mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev) |
| 1146 | { |
| 1147 | return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager); |
| 1148 | } |
| 1149 | |
| 1150 | static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev) |
| 1151 | { |
| 1152 | return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists); |
| 1153 | } |
| 1154 | |
| 1155 | static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev) |
| 1156 | { |
| 1157 | return dev->priv.sriov.max_vfs; |
| 1158 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1159 | |
| 1160 | static inline int mlx5_get_gid_table_len(u16 param) |
| 1161 | { |
| 1162 | if (param > 4) { |
| 1163 | pr_warn("gid table length is zero\n"); |
| 1164 | return 0; |
| 1165 | } |
| 1166 | |
| 1167 | return 8 * (1 << param); |
| 1168 | } |
| 1169 | |
| 1170 | static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev) |
| 1171 | { |
| 1172 | return !!(dev->priv.rl_table.max_size); |
| 1173 | } |
| 1174 | |
| 1175 | static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev) |
| 1176 | { |
| 1177 | return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) && |
| 1178 | MLX5_CAP_GEN(dev, num_vhca_ports) <= 1; |
| 1179 | } |
| 1180 | |
| 1181 | static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev) |
| 1182 | { |
| 1183 | return MLX5_CAP_GEN(dev, num_vhca_ports) > 1; |
| 1184 | } |
| 1185 | |
| 1186 | static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev) |
| 1187 | { |
| 1188 | return mlx5_core_is_mp_slave(dev) || |
| 1189 | mlx5_core_is_mp_master(dev); |
| 1190 | } |
| 1191 | |
| 1192 | static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev) |
| 1193 | { |
| 1194 | if (!mlx5_core_mp_enabled(dev)) |
| 1195 | return 1; |
| 1196 | |
| 1197 | return MLX5_CAP_GEN(dev, native_port_num); |
| 1198 | } |
| 1199 | |
| 1200 | enum { |
| 1201 | MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, |
| 1202 | }; |
| 1203 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1204 | static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev) |
| 1205 | { |
| 1206 | struct devlink *devlink = priv_to_devlink(dev); |
| 1207 | union devlink_param_value val; |
| 1208 | |
| 1209 | devlink_param_driverinit_value_get(devlink, |
| 1210 | DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, |
| 1211 | &val); |
| 1212 | return val.vbool; |
| 1213 | } |
| 1214 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1215 | #endif /* MLX5_DRIVER_H */ |