Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | */ |
| 32 | |
| 33 | #ifndef MLX5_CORE_CQ_H |
| 34 | #define MLX5_CORE_CQ_H |
| 35 | |
| 36 | #include <rdma/ib_verbs.h> |
| 37 | #include <linux/mlx5/driver.h> |
| 38 | #include <linux/refcount.h> |
| 39 | |
| 40 | struct mlx5_core_cq { |
| 41 | u32 cqn; |
| 42 | int cqe_sz; |
| 43 | __be32 *set_ci_db; |
| 44 | __be32 *arm_db; |
| 45 | struct mlx5_uars_page *uar; |
| 46 | refcount_t refcount; |
| 47 | struct completion free; |
| 48 | unsigned vector; |
| 49 | unsigned int irqn; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 50 | void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 51 | void (*event) (struct mlx5_core_cq *, enum mlx5_event); |
| 52 | u32 cons_index; |
| 53 | unsigned arm_sn; |
| 54 | struct mlx5_rsc_debug *dbg; |
| 55 | int pid; |
| 56 | struct { |
| 57 | struct list_head list; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 58 | void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 59 | void *priv; |
| 60 | } tasklet_ctx; |
| 61 | int reset_notify_added; |
| 62 | struct list_head reset_notify; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 63 | struct mlx5_eq_comp *eq; |
| 64 | u16 uid; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 65 | }; |
| 66 | |
| 67 | |
| 68 | enum { |
| 69 | MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01, |
| 70 | MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02, |
| 71 | MLX5_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04, |
| 72 | MLX5_CQE_SYNDROME_WR_FLUSH_ERR = 0x05, |
| 73 | MLX5_CQE_SYNDROME_MW_BIND_ERR = 0x06, |
| 74 | MLX5_CQE_SYNDROME_BAD_RESP_ERR = 0x10, |
| 75 | MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11, |
| 76 | MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12, |
| 77 | MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13, |
| 78 | MLX5_CQE_SYNDROME_REMOTE_OP_ERR = 0x14, |
| 79 | MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15, |
| 80 | MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16, |
| 81 | MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22, |
| 82 | }; |
| 83 | |
| 84 | enum { |
| 85 | MLX5_CQE_OWNER_MASK = 1, |
| 86 | MLX5_CQE_REQ = 0, |
| 87 | MLX5_CQE_RESP_WR_IMM = 1, |
| 88 | MLX5_CQE_RESP_SEND = 2, |
| 89 | MLX5_CQE_RESP_SEND_IMM = 3, |
| 90 | MLX5_CQE_RESP_SEND_INV = 4, |
| 91 | MLX5_CQE_RESIZE_CQ = 5, |
| 92 | MLX5_CQE_SIG_ERR = 12, |
| 93 | MLX5_CQE_REQ_ERR = 13, |
| 94 | MLX5_CQE_RESP_ERR = 14, |
| 95 | MLX5_CQE_INVALID = 15, |
| 96 | }; |
| 97 | |
| 98 | enum { |
| 99 | MLX5_CQ_MODIFY_PERIOD = 1 << 0, |
| 100 | MLX5_CQ_MODIFY_COUNT = 1 << 1, |
| 101 | MLX5_CQ_MODIFY_OVERRUN = 1 << 2, |
| 102 | }; |
| 103 | |
| 104 | enum { |
| 105 | MLX5_CQ_OPMOD_RESIZE = 1, |
| 106 | MLX5_MODIFY_CQ_MASK_LOG_SIZE = 1 << 0, |
| 107 | MLX5_MODIFY_CQ_MASK_PG_OFFSET = 1 << 1, |
| 108 | MLX5_MODIFY_CQ_MASK_PG_SIZE = 1 << 2, |
| 109 | }; |
| 110 | |
| 111 | struct mlx5_cq_modify_params { |
| 112 | int type; |
| 113 | union { |
| 114 | struct { |
| 115 | u32 page_offset; |
| 116 | u8 log_cq_size; |
| 117 | } resize; |
| 118 | |
| 119 | struct { |
| 120 | } moder; |
| 121 | |
| 122 | struct { |
| 123 | } mapping; |
| 124 | } params; |
| 125 | }; |
| 126 | |
| 127 | enum { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 128 | CQE_STRIDE_64 = 0, |
| 129 | CQE_STRIDE_128 = 1, |
| 130 | CQE_STRIDE_128_PAD = 2, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 131 | }; |
| 132 | |
| 133 | #define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1) |
| 134 | #define MLX5_MAX_CQ_COUNT (BIT(__mlx5_bit_sz(cqc, cq_max_count)) - 1) |
| 135 | |
| 136 | static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en) |
| 137 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 138 | return padding_128_en ? CQE_STRIDE_128_PAD : |
| 139 | size == 64 ? CQE_STRIDE_64 : CQE_STRIDE_128; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 140 | } |
| 141 | |
| 142 | static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq) |
| 143 | { |
| 144 | *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff); |
| 145 | } |
| 146 | |
| 147 | enum { |
| 148 | MLX5_CQ_DB_REQ_NOT_SOL = 1 << 24, |
| 149 | MLX5_CQ_DB_REQ_NOT = 0 << 24 |
| 150 | }; |
| 151 | |
| 152 | static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, |
| 153 | void __iomem *uar_page, |
| 154 | u32 cons_index) |
| 155 | { |
| 156 | __be32 doorbell[2]; |
| 157 | u32 sn; |
| 158 | u32 ci; |
| 159 | |
| 160 | sn = cq->arm_sn & 3; |
| 161 | ci = cons_index & 0xffffff; |
| 162 | |
| 163 | *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); |
| 164 | |
| 165 | /* Make sure that the doorbell record in host memory is |
| 166 | * written before ringing the doorbell via PCI MMIO. |
| 167 | */ |
| 168 | wmb(); |
| 169 | |
| 170 | doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci); |
| 171 | doorbell[1] = cpu_to_be32(cq->cqn); |
| 172 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 173 | mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 174 | } |
| 175 | |
| 176 | static inline void mlx5_cq_hold(struct mlx5_core_cq *cq) |
| 177 | { |
| 178 | refcount_inc(&cq->refcount); |
| 179 | } |
| 180 | |
| 181 | static inline void mlx5_cq_put(struct mlx5_core_cq *cq) |
| 182 | { |
| 183 | if (refcount_dec_and_test(&cq->refcount)) |
| 184 | complete(&cq->free); |
| 185 | } |
| 186 | |
| 187 | int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 188 | u32 *in, int inlen, u32 *out, int outlen); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 189 | int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); |
| 190 | int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, |
| 191 | u32 *out, int outlen); |
| 192 | int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, |
| 193 | u32 *in, int inlen); |
| 194 | int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, |
| 195 | struct mlx5_core_cq *cq, u16 cq_period, |
| 196 | u16 cq_max_count); |
| 197 | static inline void mlx5_dump_err_cqe(struct mlx5_core_dev *dev, |
| 198 | struct mlx5_err_cqe *err_cqe) |
| 199 | { |
| 200 | print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, |
| 201 | sizeof(*err_cqe), false); |
| 202 | } |
| 203 | int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); |
| 204 | void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); |
| 205 | |
| 206 | #endif /* MLX5_CORE_CQ_H */ |