blob: 7fcad91352769e575bdd85d77c358d76ea1d9bd0 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "mlx5_ib.h"
34
35struct mlx5_ib_gsi_wr {
36 struct ib_cqe cqe;
37 struct ib_wc wc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000038 bool completed:1;
39};
40
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000041static bool mlx5_ib_deth_sqpn_cap(struct mlx5_ib_dev *dev)
42{
43 return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
44}
45
46/* Call with gsi->lock locked */
Olivier Deprez157378f2022-04-04 15:47:50 +020047static void generate_completions(struct mlx5_ib_qp *mqp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000048{
Olivier Deprez157378f2022-04-04 15:47:50 +020049 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
50 struct ib_cq *gsi_cq = mqp->ibqp.send_cq;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051 struct mlx5_ib_gsi_wr *wr;
52 u32 index;
53
54 for (index = gsi->outstanding_ci; index != gsi->outstanding_pi;
55 index++) {
56 wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr];
57
58 if (!wr->completed)
59 break;
60
Olivier Deprez157378f2022-04-04 15:47:50 +020061 WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000062 wr->completed = false;
63 }
64
65 gsi->outstanding_ci = index;
66}
67
68static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc)
69{
70 struct mlx5_ib_gsi_qp *gsi = cq->cq_context;
71 struct mlx5_ib_gsi_wr *wr =
72 container_of(wc->wr_cqe, struct mlx5_ib_gsi_wr, cqe);
Olivier Deprez157378f2022-04-04 15:47:50 +020073 struct mlx5_ib_qp *mqp = container_of(gsi, struct mlx5_ib_qp, gsi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000074 u64 wr_id;
75 unsigned long flags;
76
77 spin_lock_irqsave(&gsi->lock, flags);
78 wr->completed = true;
79 wr_id = wr->wc.wr_id;
80 wr->wc = *wc;
81 wr->wc.wr_id = wr_id;
Olivier Deprez157378f2022-04-04 15:47:50 +020082 wr->wc.qp = &mqp->ibqp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083
Olivier Deprez157378f2022-04-04 15:47:50 +020084 generate_completions(mqp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000085 spin_unlock_irqrestore(&gsi->lock, flags);
86}
87
Olivier Deprez157378f2022-04-04 15:47:50 +020088int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
89 struct ib_qp_init_attr *attr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090{
91 struct mlx5_ib_dev *dev = to_mdev(pd->device);
92 struct mlx5_ib_gsi_qp *gsi;
Olivier Deprez157378f2022-04-04 15:47:50 +020093 struct ib_qp_init_attr hw_init_attr = *attr;
94 const u8 port_num = attr->port_num;
95 int num_qps = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000096 int ret;
97
Olivier Deprez157378f2022-04-04 15:47:50 +020098 if (mlx5_ib_deth_sqpn_cap(dev)) {
99 if (MLX5_CAP_GEN(dev->mdev,
100 port_type) == MLX5_CAP_PORT_TYPE_IB)
101 num_qps = pd->device->attrs.max_pkeys;
102 else if (dev->lag_active)
103 num_qps = MLX5_MAX_PORTS;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000104 }
105
Olivier Deprez157378f2022-04-04 15:47:50 +0200106 gsi = &mqp->gsi;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000107 gsi->tx_qps = kcalloc(num_qps, sizeof(*gsi->tx_qps), GFP_KERNEL);
Olivier Deprez157378f2022-04-04 15:47:50 +0200108 if (!gsi->tx_qps)
109 return -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000110
Olivier Deprez157378f2022-04-04 15:47:50 +0200111 gsi->outstanding_wrs =
112 kcalloc(attr->cap.max_send_wr, sizeof(*gsi->outstanding_wrs),
113 GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114 if (!gsi->outstanding_wrs) {
115 ret = -ENOMEM;
116 goto err_free_tx;
117 }
118
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000119 mutex_lock(&dev->devr.mutex);
120
121 if (dev->devr.ports[port_num - 1].gsi) {
122 mlx5_ib_warn(dev, "GSI QP already exists on port %d\n",
123 port_num);
124 ret = -EBUSY;
125 goto err_free_wrs;
126 }
127 gsi->num_qps = num_qps;
128 spin_lock_init(&gsi->lock);
129
Olivier Deprez157378f2022-04-04 15:47:50 +0200130 gsi->cap = attr->cap;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000131 gsi->port_num = port_num;
132
Olivier Deprez157378f2022-04-04 15:47:50 +0200133 gsi->cq = ib_alloc_cq(pd->device, gsi, attr->cap.max_send_wr, 0,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000134 IB_POLL_SOFTIRQ);
135 if (IS_ERR(gsi->cq)) {
136 mlx5_ib_warn(dev, "unable to create send CQ for GSI QP. error %ld\n",
137 PTR_ERR(gsi->cq));
138 ret = PTR_ERR(gsi->cq);
139 goto err_free_wrs;
140 }
141
142 hw_init_attr.qp_type = MLX5_IB_QPT_HW_GSI;
143 hw_init_attr.send_cq = gsi->cq;
144 if (num_qps) {
145 hw_init_attr.cap.max_send_wr = 0;
146 hw_init_attr.cap.max_send_sge = 0;
147 hw_init_attr.cap.max_inline_data = 0;
148 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200149
150 gsi->rx_qp = mlx5_ib_create_qp(pd, &hw_init_attr, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000151 if (IS_ERR(gsi->rx_qp)) {
152 mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n",
153 PTR_ERR(gsi->rx_qp));
154 ret = PTR_ERR(gsi->rx_qp);
155 goto err_destroy_cq;
156 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200157 gsi->rx_qp->device = pd->device;
158 gsi->rx_qp->pd = pd;
159 gsi->rx_qp->real_qp = gsi->rx_qp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000160
Olivier Deprez157378f2022-04-04 15:47:50 +0200161 gsi->rx_qp->qp_type = hw_init_attr.qp_type;
162 gsi->rx_qp->send_cq = hw_init_attr.send_cq;
163 gsi->rx_qp->recv_cq = hw_init_attr.recv_cq;
164 gsi->rx_qp->event_handler = hw_init_attr.event_handler;
165 spin_lock_init(&gsi->rx_qp->mr_lock);
166 INIT_LIST_HEAD(&gsi->rx_qp->rdma_mrs);
167 INIT_LIST_HEAD(&gsi->rx_qp->sig_mrs);
168
169 dev->devr.ports[attr->port_num - 1].gsi = gsi;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000170
171 mutex_unlock(&dev->devr.mutex);
172
Olivier Deprez157378f2022-04-04 15:47:50 +0200173 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000174
175err_destroy_cq:
176 ib_free_cq(gsi->cq);
177err_free_wrs:
178 mutex_unlock(&dev->devr.mutex);
179 kfree(gsi->outstanding_wrs);
180err_free_tx:
181 kfree(gsi->tx_qps);
Olivier Deprez157378f2022-04-04 15:47:50 +0200182 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000183}
184
Olivier Deprez157378f2022-04-04 15:47:50 +0200185int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000186{
Olivier Deprez157378f2022-04-04 15:47:50 +0200187 struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device);
188 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000189 const int port_num = gsi->port_num;
190 int qp_index;
191 int ret;
192
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000193 mutex_lock(&dev->devr.mutex);
Olivier Deprez157378f2022-04-04 15:47:50 +0200194 ret = mlx5_ib_destroy_qp(gsi->rx_qp, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000195 if (ret) {
196 mlx5_ib_warn(dev, "unable to destroy hardware GSI QP. error %d\n",
197 ret);
198 mutex_unlock(&dev->devr.mutex);
199 return ret;
200 }
201 dev->devr.ports[port_num - 1].gsi = NULL;
202 mutex_unlock(&dev->devr.mutex);
203 gsi->rx_qp = NULL;
204
205 for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) {
206 if (!gsi->tx_qps[qp_index])
207 continue;
208 WARN_ON_ONCE(ib_destroy_qp(gsi->tx_qps[qp_index]));
209 gsi->tx_qps[qp_index] = NULL;
210 }
211
212 ib_free_cq(gsi->cq);
213
214 kfree(gsi->outstanding_wrs);
215 kfree(gsi->tx_qps);
Olivier Deprez157378f2022-04-04 15:47:50 +0200216 kfree(mqp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000217
218 return 0;
219}
220
221static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
222{
223 struct ib_pd *pd = gsi->rx_qp->pd;
224 struct ib_qp_init_attr init_attr = {
225 .event_handler = gsi->rx_qp->event_handler,
226 .qp_context = gsi->rx_qp->qp_context,
227 .send_cq = gsi->cq,
228 .recv_cq = gsi->rx_qp->recv_cq,
229 .cap = {
230 .max_send_wr = gsi->cap.max_send_wr,
231 .max_send_sge = gsi->cap.max_send_sge,
232 .max_inline_data = gsi->cap.max_inline_data,
233 },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000234 .qp_type = IB_QPT_UD,
Olivier Deprez157378f2022-04-04 15:47:50 +0200235 .create_flags = MLX5_IB_QP_CREATE_SQPN_QP1,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000236 };
237
238 return ib_create_qp(pd, &init_attr);
239}
240
241static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp,
Olivier Deprez157378f2022-04-04 15:47:50 +0200242 u16 pkey_index)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000243{
244 struct mlx5_ib_dev *dev = to_mdev(qp->device);
245 struct ib_qp_attr attr;
246 int mask;
247 int ret;
248
249 mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY | IB_QP_PORT;
250 attr.qp_state = IB_QPS_INIT;
Olivier Deprez157378f2022-04-04 15:47:50 +0200251 attr.pkey_index = pkey_index;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000252 attr.qkey = IB_QP1_QKEY;
253 attr.port_num = gsi->port_num;
254 ret = ib_modify_qp(qp, &attr, mask);
255 if (ret) {
256 mlx5_ib_err(dev, "could not change QP%d state to INIT: %d\n",
257 qp->qp_num, ret);
258 return ret;
259 }
260
261 attr.qp_state = IB_QPS_RTR;
262 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
263 if (ret) {
264 mlx5_ib_err(dev, "could not change QP%d state to RTR: %d\n",
265 qp->qp_num, ret);
266 return ret;
267 }
268
269 attr.qp_state = IB_QPS_RTS;
270 attr.sq_psn = 0;
271 ret = ib_modify_qp(qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
272 if (ret) {
273 mlx5_ib_err(dev, "could not change QP%d state to RTS: %d\n",
274 qp->qp_num, ret);
275 return ret;
276 }
277
278 return 0;
279}
280
281static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
282{
283 struct ib_device *device = gsi->rx_qp->device;
284 struct mlx5_ib_dev *dev = to_mdev(device);
Olivier Deprez157378f2022-04-04 15:47:50 +0200285 int pkey_index = qp_index;
286 struct mlx5_ib_qp *mqp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000287 struct ib_qp *qp;
288 unsigned long flags;
289 u16 pkey;
290 int ret;
291
Olivier Deprez157378f2022-04-04 15:47:50 +0200292 if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
293 pkey_index = 0;
294
295 ret = ib_query_pkey(device, gsi->port_num, pkey_index, &pkey);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000296 if (ret) {
297 mlx5_ib_warn(dev, "unable to read P_Key at port %d, index %d\n",
298 gsi->port_num, qp_index);
299 return;
300 }
301
302 if (!pkey) {
303 mlx5_ib_dbg(dev, "invalid P_Key at port %d, index %d. Skipping.\n",
304 gsi->port_num, qp_index);
305 return;
306 }
307
308 spin_lock_irqsave(&gsi->lock, flags);
309 qp = gsi->tx_qps[qp_index];
310 spin_unlock_irqrestore(&gsi->lock, flags);
311 if (qp) {
312 mlx5_ib_dbg(dev, "already existing GSI TX QP at port %d, index %d. Skipping\n",
313 gsi->port_num, qp_index);
314 return;
315 }
316
317 qp = create_gsi_ud_qp(gsi);
318 if (IS_ERR(qp)) {
319 mlx5_ib_warn(dev, "unable to create hardware UD QP for GSI: %ld\n",
320 PTR_ERR(qp));
321 return;
322 }
323
Olivier Deprez157378f2022-04-04 15:47:50 +0200324 mqp = to_mqp(qp);
325 if (dev->lag_active)
326 mqp->gsi_lag_port = qp_index + 1;
327 ret = modify_to_rts(gsi, qp, pkey_index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000328 if (ret)
329 goto err_destroy_qp;
330
331 spin_lock_irqsave(&gsi->lock, flags);
332 WARN_ON_ONCE(gsi->tx_qps[qp_index]);
333 gsi->tx_qps[qp_index] = qp;
334 spin_unlock_irqrestore(&gsi->lock, flags);
335
336 return;
337
338err_destroy_qp:
339 WARN_ON_ONCE(qp);
340}
341
342static void setup_qps(struct mlx5_ib_gsi_qp *gsi)
343{
Olivier Deprez157378f2022-04-04 15:47:50 +0200344 struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000345 u16 qp_index;
346
Olivier Deprez157378f2022-04-04 15:47:50 +0200347 mutex_lock(&dev->devr.mutex);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000348 for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index)
349 setup_qp(gsi, qp_index);
Olivier Deprez157378f2022-04-04 15:47:50 +0200350 mutex_unlock(&dev->devr.mutex);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000351}
352
353int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
354 int attr_mask)
355{
356 struct mlx5_ib_dev *dev = to_mdev(qp->device);
Olivier Deprez157378f2022-04-04 15:47:50 +0200357 struct mlx5_ib_qp *mqp = to_mqp(qp);
358 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000359 int ret;
360
361 mlx5_ib_dbg(dev, "modifying GSI QP to state %d\n", attr->qp_state);
362
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000363 ret = ib_modify_qp(gsi->rx_qp, attr, attr_mask);
364 if (ret) {
365 mlx5_ib_warn(dev, "unable to modify GSI rx QP: %d\n", ret);
Olivier Deprez157378f2022-04-04 15:47:50 +0200366 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000367 }
368
369 if (to_mqp(gsi->rx_qp)->state == IB_QPS_RTS)
370 setup_qps(gsi);
Olivier Deprez157378f2022-04-04 15:47:50 +0200371 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000372}
373
374int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
375 int qp_attr_mask,
376 struct ib_qp_init_attr *qp_init_attr)
377{
Olivier Deprez157378f2022-04-04 15:47:50 +0200378 struct mlx5_ib_qp *mqp = to_mqp(qp);
379 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000380 int ret;
381
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000382 ret = ib_query_qp(gsi->rx_qp, qp_attr, qp_attr_mask, qp_init_attr);
383 qp_init_attr->cap = gsi->cap;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000384 return ret;
385}
386
387/* Call with gsi->lock locked */
Olivier Deprez157378f2022-04-04 15:47:50 +0200388static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_qp *mqp,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000389 struct ib_ud_wr *wr, struct ib_wc *wc)
390{
Olivier Deprez157378f2022-04-04 15:47:50 +0200391 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000392 struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
393 struct mlx5_ib_gsi_wr *gsi_wr;
394
395 if (gsi->outstanding_pi == gsi->outstanding_ci + gsi->cap.max_send_wr) {
396 mlx5_ib_warn(dev, "no available GSI work request.\n");
397 return -ENOMEM;
398 }
399
400 gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi %
401 gsi->cap.max_send_wr];
402 gsi->outstanding_pi++;
403
404 if (!wc) {
405 memset(&gsi_wr->wc, 0, sizeof(gsi_wr->wc));
406 gsi_wr->wc.pkey_index = wr->pkey_index;
407 gsi_wr->wc.wr_id = wr->wr.wr_id;
408 } else {
409 gsi_wr->wc = *wc;
410 gsi_wr->completed = true;
411 }
412
413 gsi_wr->cqe.done = &handle_single_completion;
414 wr->wr.wr_cqe = &gsi_wr->cqe;
415
416 return 0;
417}
418
419/* Call with gsi->lock locked */
Olivier Deprez157378f2022-04-04 15:47:50 +0200420static int mlx5_ib_gsi_silent_drop(struct mlx5_ib_qp *mqp, struct ib_ud_wr *wr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000421{
422 struct ib_wc wc = {
423 { .wr_id = wr->wr.wr_id },
424 .status = IB_WC_SUCCESS,
425 .opcode = IB_WC_SEND,
Olivier Deprez157378f2022-04-04 15:47:50 +0200426 .qp = &mqp->ibqp,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000427 };
428 int ret;
429
Olivier Deprez157378f2022-04-04 15:47:50 +0200430 ret = mlx5_ib_add_outstanding_wr(mqp, wr, &wc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000431 if (ret)
432 return ret;
433
Olivier Deprez157378f2022-04-04 15:47:50 +0200434 generate_completions(mqp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000435
436 return 0;
437}
438
439/* Call with gsi->lock locked */
440static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
441{
442 struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
Olivier Deprez157378f2022-04-04 15:47:50 +0200443 struct mlx5_ib_ah *ah = to_mah(wr->ah);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000444 int qp_index = wr->pkey_index;
445
Olivier Deprez157378f2022-04-04 15:47:50 +0200446 if (!gsi->num_qps)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000447 return gsi->rx_qp;
448
Olivier Deprez157378f2022-04-04 15:47:50 +0200449 if (dev->lag_active && ah->xmit_port)
450 qp_index = ah->xmit_port - 1;
451
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000452 if (qp_index >= gsi->num_qps)
453 return NULL;
454
455 return gsi->tx_qps[qp_index];
456}
457
458int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
459 const struct ib_send_wr **bad_wr)
460{
Olivier Deprez157378f2022-04-04 15:47:50 +0200461 struct mlx5_ib_qp *mqp = to_mqp(qp);
462 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000463 struct ib_qp *tx_qp;
464 unsigned long flags;
465 int ret;
466
467 for (; wr; wr = wr->next) {
468 struct ib_ud_wr cur_wr = *ud_wr(wr);
469
470 cur_wr.wr.next = NULL;
471
472 spin_lock_irqsave(&gsi->lock, flags);
473 tx_qp = get_tx_qp(gsi, &cur_wr);
474 if (!tx_qp) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200475 ret = mlx5_ib_gsi_silent_drop(mqp, &cur_wr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000476 if (ret)
477 goto err;
478 spin_unlock_irqrestore(&gsi->lock, flags);
479 continue;
480 }
481
Olivier Deprez157378f2022-04-04 15:47:50 +0200482 ret = mlx5_ib_add_outstanding_wr(mqp, &cur_wr, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000483 if (ret)
484 goto err;
485
486 ret = ib_post_send(tx_qp, &cur_wr.wr, bad_wr);
487 if (ret) {
488 /* Undo the effect of adding the outstanding wr */
Olivier Deprez0e641232021-09-23 10:07:05 +0200489 gsi->outstanding_pi--;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000490 goto err;
491 }
492 spin_unlock_irqrestore(&gsi->lock, flags);
493 }
494
495 return 0;
496
497err:
498 spin_unlock_irqrestore(&gsi->lock, flags);
499 *bad_wr = wr;
500 return ret;
501}
502
503int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
504 const struct ib_recv_wr **bad_wr)
505{
Olivier Deprez157378f2022-04-04 15:47:50 +0200506 struct mlx5_ib_qp *mqp = to_mqp(qp);
507 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000508
509 return ib_post_recv(gsi->rx_qp, wr, bad_wr);
510}
511
512void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi)
513{
514 if (!gsi)
515 return;
516
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000517 setup_qps(gsi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000518}