blob: 23ce0126b268e004af4be6c48cb5a27a0c1f5ffc [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kref.h>
34#include <rdma/ib_umem.h>
35#include <rdma/ib_user_verbs.h>
36#include <rdma/ib_cache.h>
37#include "mlx5_ib.h"
David Brazdil0f672f62019-12-10 10:32:29 +000038#include "srq.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000039
David Brazdil0f672f62019-12-10 10:32:29 +000040static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000041{
42 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
43
44 ibcq->comp_handler(ibcq, ibcq->cq_context);
45}
46
47static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
48{
49 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
50 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
51 struct ib_cq *ibcq = &cq->ibcq;
52 struct ib_event event;
53
54 if (type != MLX5_EVENT_TYPE_CQ_ERROR) {
55 mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n",
56 type, mcq->cqn);
57 return;
58 }
59
60 if (ibcq->event_handler) {
61 event.device = &dev->ib_dev;
62 event.event = IB_EVENT_CQ_ERR;
63 event.element.cq = ibcq;
64 ibcq->event_handler(&event, ibcq->cq_context);
65 }
66}
67
68static void *get_cqe(struct mlx5_ib_cq *cq, int n)
69{
70 return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
71}
72
73static u8 sw_ownership_bit(int n, int nent)
74{
75 return (n & nent) ? 1 : 0;
76}
77
78static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
79{
80 void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
81 struct mlx5_cqe64 *cqe64;
82
83 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
84
David Brazdil0f672f62019-12-10 10:32:29 +000085 if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000086 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
87 return cqe;
88 } else {
89 return NULL;
90 }
91}
92
93static void *next_cqe_sw(struct mlx5_ib_cq *cq)
94{
95 return get_sw_cqe(cq, cq->mcq.cons_index);
96}
97
98static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
99{
100 switch (wq->wr_data[idx]) {
101 case MLX5_IB_WR_UMR:
102 return 0;
103
104 case IB_WR_LOCAL_INV:
105 return IB_WC_LOCAL_INV;
106
107 case IB_WR_REG_MR:
108 return IB_WC_REG_MR;
109
110 default:
111 pr_warn("unknown completion status\n");
112 return 0;
113 }
114}
115
116static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
117 struct mlx5_ib_wq *wq, int idx)
118{
119 wc->wc_flags = 0;
120 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
121 case MLX5_OPCODE_RDMA_WRITE_IMM:
122 wc->wc_flags |= IB_WC_WITH_IMM;
123 /* fall through */
124 case MLX5_OPCODE_RDMA_WRITE:
125 wc->opcode = IB_WC_RDMA_WRITE;
126 break;
127 case MLX5_OPCODE_SEND_IMM:
128 wc->wc_flags |= IB_WC_WITH_IMM;
129 /* fall through */
130 case MLX5_OPCODE_SEND:
131 case MLX5_OPCODE_SEND_INVAL:
132 wc->opcode = IB_WC_SEND;
133 break;
134 case MLX5_OPCODE_RDMA_READ:
135 wc->opcode = IB_WC_RDMA_READ;
136 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
137 break;
138 case MLX5_OPCODE_ATOMIC_CS:
139 wc->opcode = IB_WC_COMP_SWAP;
140 wc->byte_len = 8;
141 break;
142 case MLX5_OPCODE_ATOMIC_FA:
143 wc->opcode = IB_WC_FETCH_ADD;
144 wc->byte_len = 8;
145 break;
146 case MLX5_OPCODE_ATOMIC_MASKED_CS:
147 wc->opcode = IB_WC_MASKED_COMP_SWAP;
148 wc->byte_len = 8;
149 break;
150 case MLX5_OPCODE_ATOMIC_MASKED_FA:
151 wc->opcode = IB_WC_MASKED_FETCH_ADD;
152 wc->byte_len = 8;
153 break;
154 case MLX5_OPCODE_UMR:
155 wc->opcode = get_umr_comp(wq, idx);
156 break;
157 }
158}
159
160enum {
161 MLX5_GRH_IN_BUFFER = 1,
162 MLX5_GRH_IN_CQE = 2,
163};
164
165static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
166 struct mlx5_ib_qp *qp)
167{
168 enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
169 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
Olivier Deprez0e641232021-09-23 10:07:05 +0200170 struct mlx5_ib_srq *srq = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000171 struct mlx5_ib_wq *wq;
172 u16 wqe_ctr;
173 u8 roce_packet_type;
174 bool vlan_present;
175 u8 g;
176
177 if (qp->ibqp.srq || qp->ibqp.xrcd) {
178 struct mlx5_core_srq *msrq = NULL;
179
180 if (qp->ibqp.xrcd) {
David Brazdil0f672f62019-12-10 10:32:29 +0000181 msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
Olivier Deprez0e641232021-09-23 10:07:05 +0200182 if (msrq)
183 srq = to_mibsrq(msrq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000184 } else {
185 srq = to_msrq(qp->ibqp.srq);
186 }
187 if (srq) {
188 wqe_ctr = be16_to_cpu(cqe->wqe_counter);
189 wc->wr_id = srq->wrid[wqe_ctr];
190 mlx5_ib_free_srq_wqe(srq, wqe_ctr);
David Brazdil0f672f62019-12-10 10:32:29 +0000191 if (msrq)
192 mlx5_core_res_put(&msrq->common);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000193 }
194 } else {
195 wq = &qp->rq;
196 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
197 ++wq->tail;
198 }
199 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
200
David Brazdil0f672f62019-12-10 10:32:29 +0000201 switch (get_cqe_opcode(cqe)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000202 case MLX5_CQE_RESP_WR_IMM:
203 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
204 wc->wc_flags = IB_WC_WITH_IMM;
205 wc->ex.imm_data = cqe->imm_inval_pkey;
206 break;
207 case MLX5_CQE_RESP_SEND:
208 wc->opcode = IB_WC_RECV;
209 wc->wc_flags = IB_WC_IP_CSUM_OK;
210 if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
211 (cqe->hds_ip_ext & CQE_L4_OK))))
212 wc->wc_flags = 0;
213 break;
214 case MLX5_CQE_RESP_SEND_IMM:
215 wc->opcode = IB_WC_RECV;
216 wc->wc_flags = IB_WC_WITH_IMM;
217 wc->ex.imm_data = cqe->imm_inval_pkey;
218 break;
219 case MLX5_CQE_RESP_SEND_INV:
220 wc->opcode = IB_WC_RECV;
221 wc->wc_flags = IB_WC_WITH_INVALIDATE;
222 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
223 break;
224 }
225 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
226 wc->dlid_path_bits = cqe->ml_path;
227 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
228 wc->wc_flags |= g ? IB_WC_GRH : 0;
229 if (unlikely(is_qp1(qp->ibqp.qp_type))) {
230 u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
231
232 ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
233 &wc->pkey_index);
234 } else {
235 wc->pkey_index = 0;
236 }
237
238 if (ll != IB_LINK_LAYER_ETHERNET) {
239 wc->slid = be16_to_cpu(cqe->slid);
240 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
241 return;
242 }
243
244 wc->slid = 0;
245 vlan_present = cqe->l4_l3_hdr_type & 0x1;
246 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
247 if (vlan_present) {
248 wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff;
249 wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7;
250 wc->wc_flags |= IB_WC_WITH_VLAN;
251 } else {
252 wc->sl = 0;
253 }
254
255 switch (roce_packet_type) {
256 case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH:
257 wc->network_hdr_type = RDMA_NETWORK_IB;
258 break;
259 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6:
260 wc->network_hdr_type = RDMA_NETWORK_IPV6;
261 break;
262 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4:
263 wc->network_hdr_type = RDMA_NETWORK_IPV4;
264 break;
265 }
266 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
267}
268
269static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
270{
271 mlx5_ib_warn(dev, "dump error cqe\n");
272 mlx5_dump_err_cqe(dev->mdev, cqe);
273}
274
275static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
276 struct mlx5_err_cqe *cqe,
277 struct ib_wc *wc)
278{
279 int dump = 1;
280
281 switch (cqe->syndrome) {
282 case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR:
283 wc->status = IB_WC_LOC_LEN_ERR;
284 break;
285 case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR:
286 wc->status = IB_WC_LOC_QP_OP_ERR;
287 break;
288 case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR:
289 wc->status = IB_WC_LOC_PROT_ERR;
290 break;
291 case MLX5_CQE_SYNDROME_WR_FLUSH_ERR:
292 dump = 0;
293 wc->status = IB_WC_WR_FLUSH_ERR;
294 break;
295 case MLX5_CQE_SYNDROME_MW_BIND_ERR:
296 wc->status = IB_WC_MW_BIND_ERR;
297 break;
298 case MLX5_CQE_SYNDROME_BAD_RESP_ERR:
299 wc->status = IB_WC_BAD_RESP_ERR;
300 break;
301 case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR:
302 wc->status = IB_WC_LOC_ACCESS_ERR;
303 break;
304 case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
305 wc->status = IB_WC_REM_INV_REQ_ERR;
306 break;
307 case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR:
308 wc->status = IB_WC_REM_ACCESS_ERR;
309 break;
310 case MLX5_CQE_SYNDROME_REMOTE_OP_ERR:
311 wc->status = IB_WC_REM_OP_ERR;
312 break;
313 case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
314 wc->status = IB_WC_RETRY_EXC_ERR;
315 dump = 0;
316 break;
317 case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
318 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
319 dump = 0;
320 break;
321 case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR:
322 wc->status = IB_WC_REM_ABORT_ERR;
323 break;
324 default:
325 wc->status = IB_WC_GENERAL_ERR;
326 break;
327 }
328
329 wc->vendor_err = cqe->vendor_err_synd;
330 if (dump)
331 dump_cqe(dev, cqe);
332}
333
Olivier Deprez0e641232021-09-23 10:07:05 +0200334static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
335 u16 tail, u16 head)
336{
337 u16 idx;
338
339 do {
340 idx = tail & (qp->sq.wqe_cnt - 1);
341 if (idx == head)
342 break;
343
344 tail = qp->sq.w_list[idx].next;
345 } while (1);
346 tail = qp->sq.w_list[idx].next;
347 qp->sq.last_poll = tail;
348}
349
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000350static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
351{
David Brazdil0f672f62019-12-10 10:32:29 +0000352 mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000353}
354
355static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
356 struct ib_sig_err *item)
357{
358 u16 syndrome = be16_to_cpu(cqe->syndrome);
359
360#define GUARD_ERR (1 << 13)
361#define APPTAG_ERR (1 << 12)
362#define REFTAG_ERR (1 << 11)
363
364 if (syndrome & GUARD_ERR) {
365 item->err_type = IB_SIG_BAD_GUARD;
366 item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
367 item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
368 } else
369 if (syndrome & REFTAG_ERR) {
370 item->err_type = IB_SIG_BAD_REFTAG;
371 item->expected = be32_to_cpu(cqe->expected_reftag);
372 item->actual = be32_to_cpu(cqe->actual_reftag);
373 } else
374 if (syndrome & APPTAG_ERR) {
375 item->err_type = IB_SIG_BAD_APPTAG;
376 item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
377 item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
378 } else {
379 pr_err("Got signature completion error with bad syndrome %04x\n",
380 syndrome);
381 }
382
383 item->sig_err_offset = be64_to_cpu(cqe->err_offset);
384 item->key = be32_to_cpu(cqe->mkey);
385}
386
David Brazdil0f672f62019-12-10 10:32:29 +0000387static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
Olivier Deprez0e641232021-09-23 10:07:05 +0200388 int *npolled, bool is_send)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000389{
390 struct mlx5_ib_wq *wq;
391 unsigned int cur;
392 int np;
393 int i;
394
David Brazdil0f672f62019-12-10 10:32:29 +0000395 wq = (is_send) ? &qp->sq : &qp->rq;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000396 cur = wq->head - wq->tail;
397 np = *npolled;
398
399 if (cur == 0)
400 return;
401
402 for (i = 0; i < cur && np < num_entries; i++) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200403 unsigned int idx;
404
405 idx = (is_send) ? wq->last_poll : wq->tail;
406 idx &= (wq->wqe_cnt - 1);
407 wc->wr_id = wq->wrid[idx];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000408 wc->status = IB_WC_WR_FLUSH_ERR;
409 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
410 wq->tail++;
Olivier Deprez0e641232021-09-23 10:07:05 +0200411 if (is_send)
412 wq->last_poll = wq->w_list[idx].next;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000413 np++;
414 wc->qp = &qp->ibqp;
415 wc++;
416 }
417 *npolled = np;
418}
419
420static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
421 struct ib_wc *wc, int *npolled)
422{
423 struct mlx5_ib_qp *qp;
424
425 *npolled = 0;
426 /* Find uncompleted WQEs belonging to that cq and return mmics ones */
427 list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
David Brazdil0f672f62019-12-10 10:32:29 +0000428 sw_comp(qp, num_entries, wc + *npolled, npolled, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000429 if (*npolled >= num_entries)
430 return;
431 }
432
433 list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
David Brazdil0f672f62019-12-10 10:32:29 +0000434 sw_comp(qp, num_entries, wc + *npolled, npolled, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000435 if (*npolled >= num_entries)
436 return;
437 }
438}
439
440static int mlx5_poll_one(struct mlx5_ib_cq *cq,
441 struct mlx5_ib_qp **cur_qp,
442 struct ib_wc *wc)
443{
444 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
445 struct mlx5_err_cqe *err_cqe;
446 struct mlx5_cqe64 *cqe64;
447 struct mlx5_core_qp *mqp;
448 struct mlx5_ib_wq *wq;
449 struct mlx5_sig_err_cqe *sig_err_cqe;
450 struct mlx5_core_mkey *mmkey;
451 struct mlx5_ib_mr *mr;
452 uint8_t opcode;
453 uint32_t qpn;
454 u16 wqe_ctr;
455 void *cqe;
456 int idx;
457
458repoll:
459 cqe = next_cqe_sw(cq);
460 if (!cqe)
461 return -EAGAIN;
462
463 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
464
465 ++cq->mcq.cons_index;
466
467 /* Make sure we read CQ entry contents after we've checked the
468 * ownership bit.
469 */
470 rmb();
471
David Brazdil0f672f62019-12-10 10:32:29 +0000472 opcode = get_cqe_opcode(cqe64);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000473 if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
474 if (likely(cq->resize_buf)) {
475 free_cq_buf(dev, &cq->buf);
476 cq->buf = *cq->resize_buf;
477 kfree(cq->resize_buf);
478 cq->resize_buf = NULL;
479 goto repoll;
480 } else {
481 mlx5_ib_warn(dev, "unexpected resize cqe\n");
482 }
483 }
484
485 qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
486 if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
487 /* We do not have to take the QP table lock here,
488 * because CQs will be locked while QPs are removed
489 * from the table.
490 */
491 mqp = __mlx5_qp_lookup(dev->mdev, qpn);
492 *cur_qp = to_mibqp(mqp);
493 }
494
495 wc->qp = &(*cur_qp)->ibqp;
496 switch (opcode) {
497 case MLX5_CQE_REQ:
498 wq = &(*cur_qp)->sq;
499 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
500 idx = wqe_ctr & (wq->wqe_cnt - 1);
501 handle_good_req(wc, cqe64, wq, idx);
Olivier Deprez0e641232021-09-23 10:07:05 +0200502 handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000503 wc->wr_id = wq->wrid[idx];
504 wq->tail = wq->wqe_head[idx] + 1;
505 wc->status = IB_WC_SUCCESS;
506 break;
507 case MLX5_CQE_RESP_WR_IMM:
508 case MLX5_CQE_RESP_SEND:
509 case MLX5_CQE_RESP_SEND_IMM:
510 case MLX5_CQE_RESP_SEND_INV:
511 handle_responder(wc, cqe64, *cur_qp);
512 wc->status = IB_WC_SUCCESS;
513 break;
514 case MLX5_CQE_RESIZE_CQ:
515 break;
516 case MLX5_CQE_REQ_ERR:
517 case MLX5_CQE_RESP_ERR:
518 err_cqe = (struct mlx5_err_cqe *)cqe64;
519 mlx5_handle_error_cqe(dev, err_cqe, wc);
520 mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
521 opcode == MLX5_CQE_REQ_ERR ?
522 "Requestor" : "Responder", cq->mcq.cqn);
523 mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
524 err_cqe->syndrome, err_cqe->vendor_err_synd);
525 if (opcode == MLX5_CQE_REQ_ERR) {
526 wq = &(*cur_qp)->sq;
527 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
528 idx = wqe_ctr & (wq->wqe_cnt - 1);
529 wc->wr_id = wq->wrid[idx];
530 wq->tail = wq->wqe_head[idx] + 1;
531 } else {
532 struct mlx5_ib_srq *srq;
533
534 if ((*cur_qp)->ibqp.srq) {
535 srq = to_msrq((*cur_qp)->ibqp.srq);
536 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
537 wc->wr_id = srq->wrid[wqe_ctr];
538 mlx5_ib_free_srq_wqe(srq, wqe_ctr);
539 } else {
540 wq = &(*cur_qp)->rq;
541 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
542 ++wq->tail;
543 }
544 }
545 break;
546 case MLX5_CQE_SIG_ERR:
547 sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
548
David Brazdil0f672f62019-12-10 10:32:29 +0000549 xa_lock(&dev->mdev->priv.mkey_table);
550 mmkey = xa_load(&dev->mdev->priv.mkey_table,
551 mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000552 mr = to_mibmr(mmkey);
553 get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
554 mr->sig->sig_err_exists = true;
555 mr->sig->sigerr_count++;
556
557 mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
558 cq->mcq.cqn, mr->sig->err_item.key,
559 mr->sig->err_item.err_type,
560 mr->sig->err_item.sig_err_offset,
561 mr->sig->err_item.expected,
562 mr->sig->err_item.actual);
563
David Brazdil0f672f62019-12-10 10:32:29 +0000564 xa_unlock(&dev->mdev->priv.mkey_table);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000565 goto repoll;
566 }
567
568 return 0;
569}
570
571static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
572 struct ib_wc *wc, bool is_fatal_err)
573{
574 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
575 struct mlx5_ib_wc *soft_wc, *next;
576 int npolled = 0;
577
578 list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
579 if (npolled >= num_entries)
580 break;
581
582 mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
583 cq->mcq.cqn);
584
585 if (unlikely(is_fatal_err)) {
586 soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
587 soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
588 }
589 wc[npolled++] = soft_wc->wc;
590 list_del(&soft_wc->list);
591 kfree(soft_wc);
592 }
593
594 return npolled;
595}
596
597int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
598{
599 struct mlx5_ib_cq *cq = to_mcq(ibcq);
600 struct mlx5_ib_qp *cur_qp = NULL;
601 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
602 struct mlx5_core_dev *mdev = dev->mdev;
603 unsigned long flags;
604 int soft_polled = 0;
605 int npolled;
606
607 spin_lock_irqsave(&cq->lock, flags);
608 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
609 /* make sure no soft wqe's are waiting */
610 if (unlikely(!list_empty(&cq->wc_list)))
611 soft_polled = poll_soft_wc(cq, num_entries, wc, true);
612
613 mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
614 wc + soft_polled, &npolled);
615 goto out;
616 }
617
618 if (unlikely(!list_empty(&cq->wc_list)))
619 soft_polled = poll_soft_wc(cq, num_entries, wc, false);
620
621 for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
622 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
623 break;
624 }
625
626 if (npolled)
627 mlx5_cq_set_ci(&cq->mcq);
628out:
629 spin_unlock_irqrestore(&cq->lock, flags);
630
631 return soft_polled + npolled;
632}
633
634int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
635{
636 struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
637 struct mlx5_ib_cq *cq = to_mcq(ibcq);
638 void __iomem *uar_page = mdev->priv.uar->map;
639 unsigned long irq_flags;
640 int ret = 0;
641
642 spin_lock_irqsave(&cq->lock, irq_flags);
643 if (cq->notify_flags != IB_CQ_NEXT_COMP)
644 cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
645
646 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
647 ret = 1;
648 spin_unlock_irqrestore(&cq->lock, irq_flags);
649
650 mlx5_cq_arm(&cq->mcq,
651 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
652 MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
653 uar_page, to_mcq(ibcq)->mcq.cons_index);
654
655 return ret;
656}
657
658static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
659 struct mlx5_ib_cq_buf *buf,
660 int nent,
661 int cqe_size)
662{
David Brazdil0f672f62019-12-10 10:32:29 +0000663 struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
664 u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
665 u8 log_wq_sz = ilog2(cqe_size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000666 int err;
667
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000668 err = mlx5_frag_buf_alloc_node(dev->mdev,
669 nent * cqe_size,
670 frag_buf,
671 dev->mdev->priv.numa_node);
672 if (err)
673 return err;
674
David Brazdil0f672f62019-12-10 10:32:29 +0000675 mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
676
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000677 buf->cqe_size = cqe_size;
678 buf->nent = nent;
679
680 return 0;
681}
682
683enum {
684 MLX5_CQE_RES_FORMAT_HASH = 0,
685 MLX5_CQE_RES_FORMAT_CSUM = 1,
686 MLX5_CQE_RES_FORMAT_CSUM_STRIDX = 3,
687};
688
689static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format)
690{
691 switch (format) {
692 case MLX5_IB_CQE_RES_FORMAT_HASH:
693 return MLX5_CQE_RES_FORMAT_HASH;
694 case MLX5_IB_CQE_RES_FORMAT_CSUM:
695 return MLX5_CQE_RES_FORMAT_CSUM;
696 case MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX:
697 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
698 return MLX5_CQE_RES_FORMAT_CSUM_STRIDX;
699 return -EOPNOTSUPP;
700 default:
701 return -EINVAL;
702 }
703}
704
705static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
David Brazdil0f672f62019-12-10 10:32:29 +0000706 struct mlx5_ib_cq *cq, int entries, u32 **cqb,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000707 int *cqe_size, int *index, int *inlen)
708{
709 struct mlx5_ib_create_cq ucmd = {};
710 size_t ucmdlen;
711 int page_shift;
712 __be64 *pas;
713 int npages;
714 int ncont;
715 void *cqc;
716 int err;
David Brazdil0f672f62019-12-10 10:32:29 +0000717 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
718 udata, struct mlx5_ib_ucontext, ibucontext);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000719
720 ucmdlen = udata->inlen < sizeof(ucmd) ?
721 (sizeof(ucmd) - sizeof(ucmd.flags)) : sizeof(ucmd);
722
723 if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
724 return -EFAULT;
725
726 if (ucmdlen == sizeof(ucmd) &&
727 (ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD)))
728 return -EINVAL;
729
730 if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
731 return -EINVAL;
732
733 *cqe_size = ucmd.cqe_size;
734
David Brazdil0f672f62019-12-10 10:32:29 +0000735 cq->buf.umem =
736 ib_umem_get(udata, ucmd.buf_addr, entries * ucmd.cqe_size,
737 IB_ACCESS_LOCAL_WRITE, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000738 if (IS_ERR(cq->buf.umem)) {
739 err = PTR_ERR(cq->buf.umem);
740 return err;
741 }
742
David Brazdil0f672f62019-12-10 10:32:29 +0000743 err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &cq->db);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000744 if (err)
745 goto err_umem;
746
747 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
748 &ncont, NULL);
749 mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
750 ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
751
752 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
753 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
754 *cqb = kvzalloc(*inlen, GFP_KERNEL);
755 if (!*cqb) {
756 err = -ENOMEM;
757 goto err_db;
758 }
759
760 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
761 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0);
762
763 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
764 MLX5_SET(cqc, cqc, log_page_size,
765 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
766
David Brazdil0f672f62019-12-10 10:32:29 +0000767 *index = context->bfregi.sys_pages[0];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000768
769 if (ucmd.cqe_comp_en == 1) {
770 int mini_cqe_format;
771
772 if (!((*cqe_size == 128 &&
773 MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) ||
774 (*cqe_size == 64 &&
775 MLX5_CAP_GEN(dev->mdev, cqe_compression)))) {
776 err = -EOPNOTSUPP;
777 mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
778 *cqe_size);
779 goto err_cqb;
780 }
781
782 mini_cqe_format =
783 mini_cqe_res_format_to_hw(dev,
784 ucmd.cqe_comp_res_format);
785 if (mini_cqe_format < 0) {
786 err = mini_cqe_format;
787 mlx5_ib_dbg(dev, "CQE compression res format %d error: %d\n",
788 ucmd.cqe_comp_res_format, err);
789 goto err_cqb;
790 }
791
792 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
793 MLX5_SET(cqc, cqc, mini_cqe_res_format, mini_cqe_format);
794 }
795
796 if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) {
797 if (*cqe_size != 128 ||
798 !MLX5_CAP_GEN(dev->mdev, cqe_128_always)) {
799 err = -EOPNOTSUPP;
800 mlx5_ib_warn(dev,
801 "CQE padding is not supported for CQE size of %dB!\n",
802 *cqe_size);
803 goto err_cqb;
804 }
805
806 cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
807 }
808
David Brazdil0f672f62019-12-10 10:32:29 +0000809 MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000810 return 0;
811
812err_cqb:
813 kvfree(*cqb);
814
815err_db:
David Brazdil0f672f62019-12-10 10:32:29 +0000816 mlx5_ib_db_unmap_user(context, &cq->db);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000817
818err_umem:
819 ib_umem_release(cq->buf.umem);
820 return err;
821}
822
David Brazdil0f672f62019-12-10 10:32:29 +0000823static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000824{
David Brazdil0f672f62019-12-10 10:32:29 +0000825 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
826 udata, struct mlx5_ib_ucontext, ibucontext);
827
828 mlx5_ib_db_unmap_user(context, &cq->db);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000829 ib_umem_release(cq->buf.umem);
830}
831
Olivier Deprez0e641232021-09-23 10:07:05 +0200832static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000833{
834 int i;
835 void *cqe;
836 struct mlx5_cqe64 *cqe64;
837
838 for (i = 0; i < buf->nent; i++) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200839 cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000840 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
841 cqe64->op_own = MLX5_CQE_INVALID << 4;
842 }
843}
844
845static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
846 int entries, int cqe_size,
847 u32 **cqb, int *index, int *inlen)
848{
849 __be64 *pas;
850 void *cqc;
851 int err;
852
853 err = mlx5_db_alloc(dev->mdev, &cq->db);
854 if (err)
855 return err;
856
857 cq->mcq.set_ci_db = cq->db.db;
858 cq->mcq.arm_db = cq->db.db + 1;
859 cq->mcq.cqe_sz = cqe_size;
860
861 err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size);
862 if (err)
863 goto err_db;
864
Olivier Deprez0e641232021-09-23 10:07:05 +0200865 init_cq_frag_buf(&cq->buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000866
867 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
868 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
David Brazdil0f672f62019-12-10 10:32:29 +0000869 cq->buf.frag_buf.npages;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000870 *cqb = kvzalloc(*inlen, GFP_KERNEL);
871 if (!*cqb) {
872 err = -ENOMEM;
873 goto err_buf;
874 }
875
876 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
David Brazdil0f672f62019-12-10 10:32:29 +0000877 mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000878
879 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
880 MLX5_SET(cqc, cqc, log_page_size,
David Brazdil0f672f62019-12-10 10:32:29 +0000881 cq->buf.frag_buf.page_shift -
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000882 MLX5_ADAPTER_PAGE_SHIFT);
883
884 *index = dev->mdev->priv.uar->index;
885
886 return 0;
887
888err_buf:
889 free_cq_buf(dev, &cq->buf);
890
891err_db:
892 mlx5_db_free(dev->mdev, &cq->db);
893 return err;
894}
895
896static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
897{
898 free_cq_buf(dev, &cq->buf);
899 mlx5_db_free(dev->mdev, &cq->db);
900}
901
902static void notify_soft_wc_handler(struct work_struct *work)
903{
904 struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
905 notify_work);
906
907 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
908}
909
David Brazdil0f672f62019-12-10 10:32:29 +0000910int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
911 struct ib_udata *udata)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000912{
David Brazdil0f672f62019-12-10 10:32:29 +0000913 struct ib_device *ibdev = ibcq->device;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000914 int entries = attr->cqe;
915 int vector = attr->comp_vector;
916 struct mlx5_ib_dev *dev = to_mdev(ibdev);
David Brazdil0f672f62019-12-10 10:32:29 +0000917 struct mlx5_ib_cq *cq = to_mcq(ibcq);
918 u32 out[MLX5_ST_SZ_DW(create_cq_out)];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000919 int uninitialized_var(index);
920 int uninitialized_var(inlen);
921 u32 *cqb = NULL;
922 void *cqc;
923 int cqe_size;
924 unsigned int irqn;
925 int eqn;
926 int err;
927
928 if (entries < 0 ||
929 (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
David Brazdil0f672f62019-12-10 10:32:29 +0000930 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000931
932 if (check_cq_create_flags(attr->flags))
David Brazdil0f672f62019-12-10 10:32:29 +0000933 return -EOPNOTSUPP;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000934
935 entries = roundup_pow_of_two(entries + 1);
936 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
David Brazdil0f672f62019-12-10 10:32:29 +0000937 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000938
939 cq->ibcq.cqe = entries - 1;
940 mutex_init(&cq->resize_mutex);
941 spin_lock_init(&cq->lock);
942 cq->resize_buf = NULL;
943 cq->resize_umem = NULL;
944 cq->create_flags = attr->flags;
945 INIT_LIST_HEAD(&cq->list_send_qp);
946 INIT_LIST_HEAD(&cq->list_recv_qp);
947
David Brazdil0f672f62019-12-10 10:32:29 +0000948 if (udata) {
949 err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size,
950 &index, &inlen);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000951 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +0000952 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000953 } else {
954 cqe_size = cache_line_size() == 128 ? 128 : 64;
955 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
956 &index, &inlen);
957 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +0000958 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000959
960 INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
961 }
962
963 err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
964 if (err)
965 goto err_cqb;
966
967 cq->cqe_size = cqe_size;
968
969 cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
970 MLX5_SET(cqc, cqc, cqe_sz,
971 cqe_sz_to_mlx_sz(cqe_size,
972 cq->private_flags &
973 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
974 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
975 MLX5_SET(cqc, cqc, uar_page, index);
976 MLX5_SET(cqc, cqc, c_eqn, eqn);
977 MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
978 if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
979 MLX5_SET(cqc, cqc, oi, 1);
980
David Brazdil0f672f62019-12-10 10:32:29 +0000981 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000982 if (err)
983 goto err_cqb;
984
985 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
986 cq->mcq.irqn = irqn;
David Brazdil0f672f62019-12-10 10:32:29 +0000987 if (udata)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000988 cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
989 else
990 cq->mcq.comp = mlx5_ib_cq_comp;
991 cq->mcq.event = mlx5_ib_cq_event;
992
993 INIT_LIST_HEAD(&cq->wc_list);
994
David Brazdil0f672f62019-12-10 10:32:29 +0000995 if (udata)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000996 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
997 err = -EFAULT;
998 goto err_cmd;
999 }
1000
1001
1002 kvfree(cqb);
David Brazdil0f672f62019-12-10 10:32:29 +00001003 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001004
1005err_cmd:
1006 mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
1007
1008err_cqb:
1009 kvfree(cqb);
David Brazdil0f672f62019-12-10 10:32:29 +00001010 if (udata)
1011 destroy_cq_user(cq, udata);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001012 else
1013 destroy_cq_kernel(dev, cq);
David Brazdil0f672f62019-12-10 10:32:29 +00001014 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001015}
1016
David Brazdil0f672f62019-12-10 10:32:29 +00001017void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001018{
1019 struct mlx5_ib_dev *dev = to_mdev(cq->device);
1020 struct mlx5_ib_cq *mcq = to_mcq(cq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001021
1022 mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
David Brazdil0f672f62019-12-10 10:32:29 +00001023 if (udata)
1024 destroy_cq_user(mcq, udata);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001025 else
1026 destroy_cq_kernel(dev, mcq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001027}
1028
1029static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
1030{
1031 return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
1032}
1033
1034void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
1035{
1036 struct mlx5_cqe64 *cqe64, *dest64;
1037 void *cqe, *dest;
1038 u32 prod_index;
1039 int nfreed = 0;
1040 u8 owner_bit;
1041
1042 if (!cq)
1043 return;
1044
1045 /* First we need to find the current producer index, so we
1046 * know where to start cleaning from. It doesn't matter if HW
1047 * adds new entries after this loop -- the QP we're worried
1048 * about is already in RESET, so the new entries won't come
1049 * from our QP and therefore don't need to be checked.
1050 */
1051 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
1052 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
1053 break;
1054
1055 /* Now sweep backwards through the CQ, removing CQ entries
1056 * that match our QP by copying older entries on top of them.
1057 */
1058 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
1059 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
1060 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
1061 if (is_equal_rsn(cqe64, rsn)) {
1062 if (srq && (ntohl(cqe64->srqn) & 0xffffff))
1063 mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
1064 ++nfreed;
1065 } else if (nfreed) {
1066 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
1067 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
1068 owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK;
1069 memcpy(dest, cqe, cq->mcq.cqe_sz);
1070 dest64->op_own = owner_bit |
1071 (dest64->op_own & ~MLX5_CQE_OWNER_MASK);
1072 }
1073 }
1074
1075 if (nfreed) {
1076 cq->mcq.cons_index += nfreed;
1077 /* Make sure update of buffer contents is done before
1078 * updating consumer index.
1079 */
1080 wmb();
1081 mlx5_cq_set_ci(&cq->mcq);
1082 }
1083}
1084
1085void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
1086{
1087 if (!cq)
1088 return;
1089
1090 spin_lock_irq(&cq->lock);
1091 __mlx5_ib_cq_clean(cq, qpn, srq);
1092 spin_unlock_irq(&cq->lock);
1093}
1094
1095int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1096{
1097 struct mlx5_ib_dev *dev = to_mdev(cq->device);
1098 struct mlx5_ib_cq *mcq = to_mcq(cq);
1099 int err;
1100
1101 if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
1102 return -EOPNOTSUPP;
1103
1104 if (cq_period > MLX5_MAX_CQ_PERIOD)
1105 return -EINVAL;
1106
1107 err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
1108 cq_period, cq_count);
1109 if (err)
1110 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
1111
1112 return err;
1113}
1114
1115static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1116 int entries, struct ib_udata *udata, int *npas,
1117 int *page_shift, int *cqe_size)
1118{
1119 struct mlx5_ib_resize_cq ucmd;
1120 struct ib_umem *umem;
1121 int err;
1122 int npages;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001123
1124 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
1125 if (err)
1126 return err;
1127
1128 if (ucmd.reserved0 || ucmd.reserved1)
1129 return -EINVAL;
1130
1131 /* check multiplication overflow */
1132 if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
1133 return -EINVAL;
1134
David Brazdil0f672f62019-12-10 10:32:29 +00001135 umem = ib_umem_get(udata, ucmd.buf_addr,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001136 (size_t)ucmd.cqe_size * entries,
1137 IB_ACCESS_LOCAL_WRITE, 1);
1138 if (IS_ERR(umem)) {
1139 err = PTR_ERR(umem);
1140 return err;
1141 }
1142
1143 mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift,
1144 npas, NULL);
1145
1146 cq->resize_umem = umem;
1147 *cqe_size = ucmd.cqe_size;
1148
1149 return 0;
1150}
1151
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001152static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1153 int entries, int cqe_size)
1154{
1155 int err;
1156
1157 cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
1158 if (!cq->resize_buf)
1159 return -ENOMEM;
1160
1161 err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size);
1162 if (err)
1163 goto ex;
1164
Olivier Deprez0e641232021-09-23 10:07:05 +02001165 init_cq_frag_buf(cq->resize_buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001166
1167 return 0;
1168
1169ex:
1170 kfree(cq->resize_buf);
1171 return err;
1172}
1173
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001174static int copy_resize_cqes(struct mlx5_ib_cq *cq)
1175{
1176 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
1177 struct mlx5_cqe64 *scqe64;
1178 struct mlx5_cqe64 *dcqe64;
1179 void *start_cqe;
1180 void *scqe;
1181 void *dcqe;
1182 int ssize;
1183 int dsize;
1184 int i;
1185 u8 sw_own;
1186
1187 ssize = cq->buf.cqe_size;
1188 dsize = cq->resize_buf->cqe_size;
1189 if (ssize != dsize) {
1190 mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
1191 return -EINVAL;
1192 }
1193
1194 i = cq->mcq.cons_index;
1195 scqe = get_sw_cqe(cq, i);
1196 scqe64 = ssize == 64 ? scqe : scqe + 64;
1197 start_cqe = scqe;
1198 if (!scqe) {
1199 mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1200 return -EINVAL;
1201 }
1202
David Brazdil0f672f62019-12-10 10:32:29 +00001203 while (get_cqe_opcode(scqe64) != MLX5_CQE_RESIZE_CQ) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001204 dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
1205 (i + 1) & cq->resize_buf->nent);
1206 dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
1207 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
1208 memcpy(dcqe, scqe, dsize);
1209 dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own;
1210
1211 ++i;
1212 scqe = get_sw_cqe(cq, i);
1213 scqe64 = ssize == 64 ? scqe : scqe + 64;
1214 if (!scqe) {
1215 mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1216 return -EINVAL;
1217 }
1218
1219 if (scqe == start_cqe) {
1220 pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
1221 cq->mcq.cqn);
1222 return -ENOMEM;
1223 }
1224 }
1225 ++cq->mcq.cons_index;
1226 return 0;
1227}
1228
1229int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1230{
1231 struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
1232 struct mlx5_ib_cq *cq = to_mcq(ibcq);
1233 void *cqc;
1234 u32 *in;
1235 int err;
1236 int npas;
1237 __be64 *pas;
1238 int page_shift;
1239 int inlen;
1240 int uninitialized_var(cqe_size);
1241 unsigned long flags;
1242
1243 if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
1244 pr_info("Firmware does not support resize CQ\n");
1245 return -ENOSYS;
1246 }
1247
1248 if (entries < 1 ||
1249 entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
1250 mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
1251 entries,
1252 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
1253 return -EINVAL;
1254 }
1255
1256 entries = roundup_pow_of_two(entries + 1);
1257 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
1258 return -EINVAL;
1259
1260 if (entries == ibcq->cqe + 1)
1261 return 0;
1262
1263 mutex_lock(&cq->resize_mutex);
1264 if (udata) {
1265 err = resize_user(dev, cq, entries, udata, &npas, &page_shift,
1266 &cqe_size);
1267 } else {
1268 cqe_size = 64;
1269 err = resize_kernel(dev, cq, entries, cqe_size);
1270 if (!err) {
David Brazdil0f672f62019-12-10 10:32:29 +00001271 struct mlx5_frag_buf *frag_buf = &cq->resize_buf->frag_buf;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001272
David Brazdil0f672f62019-12-10 10:32:29 +00001273 npas = frag_buf->npages;
1274 page_shift = frag_buf->page_shift;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001275 }
1276 }
1277
1278 if (err)
1279 goto ex;
1280
1281 inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
1282 MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
1283
1284 in = kvzalloc(inlen, GFP_KERNEL);
1285 if (!in) {
1286 err = -ENOMEM;
1287 goto ex_resize;
1288 }
1289
1290 pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
1291 if (udata)
1292 mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
1293 pas, 0);
1294 else
David Brazdil0f672f62019-12-10 10:32:29 +00001295 mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001296
1297 MLX5_SET(modify_cq_in, in,
1298 modify_field_select_resize_field_select.resize_field_select.resize_field_select,
1299 MLX5_MODIFY_CQ_MASK_LOG_SIZE |
1300 MLX5_MODIFY_CQ_MASK_PG_OFFSET |
1301 MLX5_MODIFY_CQ_MASK_PG_SIZE);
1302
1303 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
1304
1305 MLX5_SET(cqc, cqc, log_page_size,
1306 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1307 MLX5_SET(cqc, cqc, cqe_sz,
1308 cqe_sz_to_mlx_sz(cqe_size,
1309 cq->private_flags &
1310 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
1311 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
1312
1313 MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
1314 MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
1315
1316 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
1317 if (err)
1318 goto ex_alloc;
1319
1320 if (udata) {
1321 cq->ibcq.cqe = entries - 1;
1322 ib_umem_release(cq->buf.umem);
1323 cq->buf.umem = cq->resize_umem;
1324 cq->resize_umem = NULL;
1325 } else {
1326 struct mlx5_ib_cq_buf tbuf;
1327 int resized = 0;
1328
1329 spin_lock_irqsave(&cq->lock, flags);
1330 if (cq->resize_buf) {
1331 err = copy_resize_cqes(cq);
1332 if (!err) {
1333 tbuf = cq->buf;
1334 cq->buf = *cq->resize_buf;
1335 kfree(cq->resize_buf);
1336 cq->resize_buf = NULL;
1337 resized = 1;
1338 }
1339 }
1340 cq->ibcq.cqe = entries - 1;
1341 spin_unlock_irqrestore(&cq->lock, flags);
1342 if (resized)
1343 free_cq_buf(dev, &tbuf);
1344 }
1345 mutex_unlock(&cq->resize_mutex);
1346
1347 kvfree(in);
1348 return 0;
1349
1350ex_alloc:
1351 kvfree(in);
1352
1353ex_resize:
David Brazdil0f672f62019-12-10 10:32:29 +00001354 ib_umem_release(cq->resize_umem);
1355 if (!udata) {
1356 free_cq_buf(dev, cq->resize_buf);
1357 cq->resize_buf = NULL;
1358 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001359ex:
1360 mutex_unlock(&cq->resize_mutex);
1361 return err;
1362}
1363
David Brazdil0f672f62019-12-10 10:32:29 +00001364int mlx5_ib_get_cqe_size(struct ib_cq *ibcq)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001365{
1366 struct mlx5_ib_cq *cq;
1367
1368 if (!ibcq)
1369 return 128;
1370
1371 cq = to_mcq(ibcq);
1372 return cq->cqe_size;
1373}
1374
1375/* Called from atomic context */
1376int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
1377{
1378 struct mlx5_ib_wc *soft_wc;
1379 struct mlx5_ib_cq *cq = to_mcq(ibcq);
1380 unsigned long flags;
1381
1382 soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
1383 if (!soft_wc)
1384 return -ENOMEM;
1385
1386 soft_wc->wc = *wc;
1387 spin_lock_irqsave(&cq->lock, flags);
1388 list_add_tail(&soft_wc->list, &cq->wc_list);
1389 if (cq->notify_flags == IB_CQ_NEXT_COMP ||
1390 wc->status != IB_WC_SUCCESS) {
1391 cq->notify_flags = 0;
1392 schedule_work(&cq->notify_work);
1393 }
1394 spin_unlock_irqrestore(&cq->lock, flags);
1395
1396 return 0;
1397}