blob: d14ad523f96c85cb9c22ec4e1c55a8ac068ebc50 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
David Brazdil0f672f62019-12-10 10:32:29 +00002 * Copyright(c) 2016 - 2019 Intel Corporation.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <linux/hash.h>
49#include <linux/bitops.h>
50#include <linux/lockdep.h>
51#include <linux/vmalloc.h>
52#include <linux/slab.h>
53#include <rdma/ib_verbs.h>
54#include <rdma/ib_hdrs.h>
55#include <rdma/opa_addr.h>
David Brazdil0f672f62019-12-10 10:32:29 +000056#include <rdma/uverbs_ioctl.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000057#include "qp.h"
58#include "vt.h"
59#include "trace.h"
60
David Brazdil0f672f62019-12-10 10:32:29 +000061#define RVT_RWQ_COUNT_THRESHOLD 16
62
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000063static void rvt_rc_timeout(struct timer_list *t);
Olivier Deprez0e641232021-09-23 10:07:05 +020064static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
65 enum ib_qp_type type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066
67/*
68 * Convert the AETH RNR timeout code into the number of microseconds.
69 */
70static const u32 ib_rvt_rnr_table[32] = {
71 655360, /* 00: 655.36 */
72 10, /* 01: .01 */
73 20, /* 02 .02 */
74 30, /* 03: .03 */
75 40, /* 04: .04 */
76 60, /* 05: .06 */
77 80, /* 06: .08 */
78 120, /* 07: .12 */
79 160, /* 08: .16 */
80 240, /* 09: .24 */
81 320, /* 0A: .32 */
82 480, /* 0B: .48 */
83 640, /* 0C: .64 */
84 960, /* 0D: .96 */
85 1280, /* 0E: 1.28 */
86 1920, /* 0F: 1.92 */
87 2560, /* 10: 2.56 */
88 3840, /* 11: 3.84 */
89 5120, /* 12: 5.12 */
90 7680, /* 13: 7.68 */
91 10240, /* 14: 10.24 */
92 15360, /* 15: 15.36 */
93 20480, /* 16: 20.48 */
94 30720, /* 17: 30.72 */
95 40960, /* 18: 40.96 */
96 61440, /* 19: 61.44 */
97 81920, /* 1A: 81.92 */
98 122880, /* 1B: 122.88 */
99 163840, /* 1C: 163.84 */
100 245760, /* 1D: 245.76 */
101 327680, /* 1E: 327.68 */
102 491520 /* 1F: 491.52 */
103};
104
105/*
106 * Note that it is OK to post send work requests in the SQE and ERR
107 * states; rvt_do_send() will process them and generate error
108 * completions as per IB 1.2 C10-96.
109 */
110const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
111 [IB_QPS_RESET] = 0,
112 [IB_QPS_INIT] = RVT_POST_RECV_OK,
113 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
114 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
115 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
116 RVT_PROCESS_NEXT_SEND_OK,
117 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
118 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
119 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
120 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
121 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
122 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
123};
124EXPORT_SYMBOL(ib_rvt_state_ops);
125
David Brazdil0f672f62019-12-10 10:32:29 +0000126/* platform specific: return the last level cache (llc) size, in KiB */
127static int rvt_wss_llc_size(void)
128{
129 /* assume that the boot CPU value is universal for all CPUs */
130 return boot_cpu_data.x86_cache_size;
131}
132
133/* platform specific: cacheless copy */
134static void cacheless_memcpy(void *dst, void *src, size_t n)
135{
136 /*
137 * Use the only available X64 cacheless copy. Add a __user cast
138 * to quiet sparse. The src agument is already in the kernel so
139 * there are no security issues. The extra fault recovery machinery
140 * is not invoked.
141 */
142 __copy_user_nocache(dst, (void __user *)src, n, 0);
143}
144
145void rvt_wss_exit(struct rvt_dev_info *rdi)
146{
147 struct rvt_wss *wss = rdi->wss;
148
149 if (!wss)
150 return;
151
152 /* coded to handle partially initialized and repeat callers */
153 kfree(wss->entries);
154 wss->entries = NULL;
155 kfree(rdi->wss);
156 rdi->wss = NULL;
157}
158
159/**
160 * rvt_wss_init - Init wss data structures
161 *
162 * Return: 0 on success
163 */
164int rvt_wss_init(struct rvt_dev_info *rdi)
165{
166 unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
167 unsigned int wss_threshold = rdi->dparms.wss_threshold;
168 unsigned int wss_clean_period = rdi->dparms.wss_clean_period;
169 long llc_size;
170 long llc_bits;
171 long table_size;
172 long table_bits;
173 struct rvt_wss *wss;
174 int node = rdi->dparms.node;
175
176 if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) {
177 rdi->wss = NULL;
178 return 0;
179 }
180
181 rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
182 if (!rdi->wss)
183 return -ENOMEM;
184 wss = rdi->wss;
185
186 /* check for a valid percent range - default to 80 if none or invalid */
187 if (wss_threshold < 1 || wss_threshold > 100)
188 wss_threshold = 80;
189
190 /* reject a wildly large period */
191 if (wss_clean_period > 1000000)
192 wss_clean_period = 256;
193
194 /* reject a zero period */
195 if (wss_clean_period == 0)
196 wss_clean_period = 1;
197
198 /*
199 * Calculate the table size - the next power of 2 larger than the
200 * LLC size. LLC size is in KiB.
201 */
202 llc_size = rvt_wss_llc_size() * 1024;
203 table_size = roundup_pow_of_two(llc_size);
204
205 /* one bit per page in rounded up table */
206 llc_bits = llc_size / PAGE_SIZE;
207 table_bits = table_size / PAGE_SIZE;
208 wss->pages_mask = table_bits - 1;
209 wss->num_entries = table_bits / BITS_PER_LONG;
210
211 wss->threshold = (llc_bits * wss_threshold) / 100;
212 if (wss->threshold == 0)
213 wss->threshold = 1;
214
215 wss->clean_period = wss_clean_period;
216 atomic_set(&wss->clean_counter, wss_clean_period);
217
218 wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries),
219 GFP_KERNEL, node);
220 if (!wss->entries) {
221 rvt_wss_exit(rdi);
222 return -ENOMEM;
223 }
224
225 return 0;
226}
227
228/*
229 * Advance the clean counter. When the clean period has expired,
230 * clean an entry.
231 *
232 * This is implemented in atomics to avoid locking. Because multiple
233 * variables are involved, it can be racy which can lead to slightly
234 * inaccurate information. Since this is only a heuristic, this is
235 * OK. Any innaccuracies will clean themselves out as the counter
236 * advances. That said, it is unlikely the entry clean operation will
237 * race - the next possible racer will not start until the next clean
238 * period.
239 *
240 * The clean counter is implemented as a decrement to zero. When zero
241 * is reached an entry is cleaned.
242 */
243static void wss_advance_clean_counter(struct rvt_wss *wss)
244{
245 int entry;
246 int weight;
247 unsigned long bits;
248
249 /* become the cleaner if we decrement the counter to zero */
250 if (atomic_dec_and_test(&wss->clean_counter)) {
251 /*
252 * Set, not add, the clean period. This avoids an issue
253 * where the counter could decrement below the clean period.
254 * Doing a set can result in lost decrements, slowing the
255 * clean advance. Since this a heuristic, this possible
256 * slowdown is OK.
257 *
258 * An alternative is to loop, advancing the counter by a
259 * clean period until the result is > 0. However, this could
260 * lead to several threads keeping another in the clean loop.
261 * This could be mitigated by limiting the number of times
262 * we stay in the loop.
263 */
264 atomic_set(&wss->clean_counter, wss->clean_period);
265
266 /*
267 * Uniquely grab the entry to clean and move to next.
268 * The current entry is always the lower bits of
269 * wss.clean_entry. The table size, wss.num_entries,
270 * is always a power-of-2.
271 */
272 entry = (atomic_inc_return(&wss->clean_entry) - 1)
273 & (wss->num_entries - 1);
274
275 /* clear the entry and count the bits */
276 bits = xchg(&wss->entries[entry], 0);
277 weight = hweight64((u64)bits);
278 /* only adjust the contended total count if needed */
279 if (weight)
280 atomic_sub(weight, &wss->total_count);
281 }
282}
283
284/*
285 * Insert the given address into the working set array.
286 */
287static void wss_insert(struct rvt_wss *wss, void *address)
288{
289 u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask;
290 u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
291 u32 nr = page & (BITS_PER_LONG - 1);
292
293 if (!test_and_set_bit(nr, &wss->entries[entry]))
294 atomic_inc(&wss->total_count);
295
296 wss_advance_clean_counter(wss);
297}
298
299/*
300 * Is the working set larger than the threshold?
301 */
302static inline bool wss_exceeds_threshold(struct rvt_wss *wss)
303{
304 return atomic_read(&wss->total_count) >= wss->threshold;
305}
306
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000307static void get_map_page(struct rvt_qpn_table *qpt,
308 struct rvt_qpn_map *map)
309{
310 unsigned long page = get_zeroed_page(GFP_KERNEL);
311
312 /*
313 * Free the page if someone raced with us installing it.
314 */
315
316 spin_lock(&qpt->lock);
317 if (map->page)
318 free_page(page);
319 else
320 map->page = (void *)page;
321 spin_unlock(&qpt->lock);
322}
323
324/**
325 * init_qpn_table - initialize the QP number table for a device
326 * @qpt: the QPN table
327 */
328static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
329{
330 u32 offset, i;
331 struct rvt_qpn_map *map;
332 int ret = 0;
333
334 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
335 return -EINVAL;
336
337 spin_lock_init(&qpt->lock);
338
339 qpt->last = rdi->dparms.qpn_start;
340 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
341
342 /*
343 * Drivers may want some QPs beyond what we need for verbs let them use
344 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
345 * for those. The reserved range must be *after* the range which verbs
346 * will pick from.
347 */
348
349 /* Figure out number of bit maps needed before reserved range */
350 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
351
352 /* This should always be zero */
353 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
354
355 /* Starting with the first reserved bit map */
356 map = &qpt->map[qpt->nmaps];
357
358 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
359 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
360 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
361 if (!map->page) {
362 get_map_page(qpt, map);
363 if (!map->page) {
364 ret = -ENOMEM;
365 break;
366 }
367 }
368 set_bit(offset, map->page);
369 offset++;
370 if (offset == RVT_BITS_PER_PAGE) {
371 /* next page */
372 qpt->nmaps++;
373 map++;
374 offset = 0;
375 }
376 }
377 return ret;
378}
379
380/**
381 * free_qpn_table - free the QP number table for a device
382 * @qpt: the QPN table
383 */
384static void free_qpn_table(struct rvt_qpn_table *qpt)
385{
386 int i;
387
388 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
389 free_page((unsigned long)qpt->map[i].page);
390}
391
392/**
393 * rvt_driver_qp_init - Init driver qp resources
394 * @rdi: rvt dev strucutre
395 *
396 * Return: 0 on success
397 */
398int rvt_driver_qp_init(struct rvt_dev_info *rdi)
399{
400 int i;
401 int ret = -ENOMEM;
402
403 if (!rdi->dparms.qp_table_size)
404 return -EINVAL;
405
406 /*
407 * If driver is not doing any QP allocation then make sure it is
408 * providing the necessary QP functions.
409 */
410 if (!rdi->driver_f.free_all_qps ||
411 !rdi->driver_f.qp_priv_alloc ||
412 !rdi->driver_f.qp_priv_free ||
413 !rdi->driver_f.notify_qp_reset ||
414 !rdi->driver_f.notify_restart_rc)
415 return -EINVAL;
416
417 /* allocate parent object */
418 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
419 rdi->dparms.node);
420 if (!rdi->qp_dev)
421 return -ENOMEM;
422
423 /* allocate hash table */
424 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
425 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
426 rdi->qp_dev->qp_table =
427 kmalloc_array_node(rdi->qp_dev->qp_table_size,
428 sizeof(*rdi->qp_dev->qp_table),
429 GFP_KERNEL, rdi->dparms.node);
430 if (!rdi->qp_dev->qp_table)
431 goto no_qp_table;
432
433 for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
434 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
435
436 spin_lock_init(&rdi->qp_dev->qpt_lock);
437
438 /* initialize qpn map */
439 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
440 goto fail_table;
441
442 spin_lock_init(&rdi->n_qps_lock);
443
444 return 0;
445
446fail_table:
447 kfree(rdi->qp_dev->qp_table);
448 free_qpn_table(&rdi->qp_dev->qpn_table);
449
450no_qp_table:
451 kfree(rdi->qp_dev);
452
453 return ret;
454}
455
456/**
Olivier Deprez0e641232021-09-23 10:07:05 +0200457 * rvt_free_qp_cb - callback function to reset a qp
458 * @qp: the qp to reset
459 * @v: a 64-bit value
460 *
461 * This function resets the qp and removes it from the
462 * qp hash table.
463 */
464static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
465{
466 unsigned int *qp_inuse = (unsigned int *)v;
467 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
468
469 /* Reset the qp and remove it from the qp hash list */
470 rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
471
472 /* Increment the qp_inuse count */
473 (*qp_inuse)++;
474}
475
476/**
477 * rvt_free_all_qps - check for QPs still in use
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000478 * @rdi: rvt device info structure
479 *
480 * There should not be any QPs still in use.
481 * Free memory for table.
Olivier Deprez0e641232021-09-23 10:07:05 +0200482 * Return the number of QPs still in use.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000483 */
484static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
485{
Olivier Deprez0e641232021-09-23 10:07:05 +0200486 unsigned int qp_inuse = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000487
488 qp_inuse += rvt_mcast_tree_empty(rdi);
489
Olivier Deprez0e641232021-09-23 10:07:05 +0200490 rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000491
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000492 return qp_inuse;
493}
494
495/**
496 * rvt_qp_exit - clean up qps on device exit
497 * @rdi: rvt dev structure
498 *
499 * Check for qp leaks and free resources.
500 */
501void rvt_qp_exit(struct rvt_dev_info *rdi)
502{
503 u32 qps_inuse = rvt_free_all_qps(rdi);
504
505 if (qps_inuse)
506 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
507 qps_inuse);
508 if (!rdi->qp_dev)
509 return;
510
511 kfree(rdi->qp_dev->qp_table);
512 free_qpn_table(&rdi->qp_dev->qpn_table);
513 kfree(rdi->qp_dev);
514}
515
516static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
517 struct rvt_qpn_map *map, unsigned off)
518{
519 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
520}
521
522/**
523 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
524 * IB_QPT_SMI/IB_QPT_GSI
525 * @rdi: rvt device info structure
526 * @qpt: queue pair number table pointer
527 * @port_num: IB port number, 1 based, comes from core
528 *
529 * Return: The queue pair number
530 */
531static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
532 enum ib_qp_type type, u8 port_num)
533{
534 u32 i, offset, max_scan, qpn;
535 struct rvt_qpn_map *map;
536 u32 ret;
537
538 if (rdi->driver_f.alloc_qpn)
539 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
540
541 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
542 unsigned n;
543
544 ret = type == IB_QPT_GSI;
545 n = 1 << (ret + 2 * (port_num - 1));
546 spin_lock(&qpt->lock);
547 if (qpt->flags & n)
548 ret = -EINVAL;
549 else
550 qpt->flags |= n;
551 spin_unlock(&qpt->lock);
552 goto bail;
553 }
554
555 qpn = qpt->last + qpt->incr;
556 if (qpn >= RVT_QPN_MAX)
557 qpn = qpt->incr | ((qpt->last & 1) ^ 1);
558 /* offset carries bit 0 */
559 offset = qpn & RVT_BITS_PER_PAGE_MASK;
560 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
561 max_scan = qpt->nmaps - !offset;
562 for (i = 0;;) {
563 if (unlikely(!map->page)) {
564 get_map_page(qpt, map);
565 if (unlikely(!map->page))
566 break;
567 }
568 do {
569 if (!test_and_set_bit(offset, map->page)) {
570 qpt->last = qpn;
571 ret = qpn;
572 goto bail;
573 }
574 offset += qpt->incr;
575 /*
576 * This qpn might be bogus if offset >= BITS_PER_PAGE.
577 * That is OK. It gets re-assigned below
578 */
579 qpn = mk_qpn(qpt, map, offset);
580 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
581 /*
582 * In order to keep the number of pages allocated to a
583 * minimum, we scan the all existing pages before increasing
584 * the size of the bitmap table.
585 */
586 if (++i > max_scan) {
587 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
588 break;
589 map = &qpt->map[qpt->nmaps++];
590 /* start at incr with current bit 0 */
591 offset = qpt->incr | (offset & 1);
592 } else if (map < &qpt->map[qpt->nmaps]) {
593 ++map;
594 /* start at incr with current bit 0 */
595 offset = qpt->incr | (offset & 1);
596 } else {
597 map = &qpt->map[0];
598 /* wrap to first map page, invert bit 0 */
599 offset = qpt->incr | ((offset & 1) ^ 1);
600 }
601 /* there can be no set bits in low-order QoS bits */
David Brazdil0f672f62019-12-10 10:32:29 +0000602 WARN_ON(rdi->dparms.qos_shift > 1 &&
603 offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000604 qpn = mk_qpn(qpt, map, offset);
605 }
606
607 ret = -ENOMEM;
608
609bail:
610 return ret;
611}
612
613/**
614 * rvt_clear_mr_refs - Drop help mr refs
615 * @qp: rvt qp data structure
616 * @clr_sends: If shoudl clear send side or not
617 */
618static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
619{
620 unsigned n;
621 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
622
623 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
624 rvt_put_ss(&qp->s_rdma_read_sge);
625
626 rvt_put_ss(&qp->r_sge);
627
628 if (clr_sends) {
629 while (qp->s_last != qp->s_head) {
630 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
631
David Brazdil0f672f62019-12-10 10:32:29 +0000632 rvt_put_qp_swqe(qp, wqe);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000633 if (++qp->s_last >= qp->s_size)
634 qp->s_last = 0;
635 smp_wmb(); /* see qp_set_savail */
636 }
637 if (qp->s_rdma_mr) {
638 rvt_put_mr(qp->s_rdma_mr);
639 qp->s_rdma_mr = NULL;
640 }
641 }
642
643 for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
644 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
645
646 if (e->rdma_sge.mr) {
647 rvt_put_mr(e->rdma_sge.mr);
648 e->rdma_sge.mr = NULL;
649 }
650 }
651}
652
653/**
654 * rvt_swqe_has_lkey - return true if lkey is used by swqe
655 * @wqe - the send wqe
656 * @lkey - the lkey
657 *
658 * Test the swqe for using lkey
659 */
660static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
661{
662 int i;
663
664 for (i = 0; i < wqe->wr.num_sge; i++) {
665 struct rvt_sge *sge = &wqe->sg_list[i];
666
667 if (rvt_mr_has_lkey(sge->mr, lkey))
668 return true;
669 }
670 return false;
671}
672
673/**
674 * rvt_qp_sends_has_lkey - return true is qp sends use lkey
675 * @qp - the rvt_qp
676 * @lkey - the lkey
677 */
678static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
679{
680 u32 s_last = qp->s_last;
681
682 while (s_last != qp->s_head) {
683 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last);
684
685 if (rvt_swqe_has_lkey(wqe, lkey))
686 return true;
687
688 if (++s_last >= qp->s_size)
689 s_last = 0;
690 }
691 if (qp->s_rdma_mr)
692 if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey))
693 return true;
694 return false;
695}
696
697/**
698 * rvt_qp_acks_has_lkey - return true if acks have lkey
699 * @qp - the qp
700 * @lkey - the lkey
701 */
702static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
703{
704 int i;
705 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
706
707 for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
708 struct rvt_ack_entry *e = &qp->s_ack_queue[i];
709
710 if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey))
711 return true;
712 }
713 return false;
714}
715
716/*
717 * rvt_qp_mr_clean - clean up remote ops for lkey
718 * @qp - the qp
719 * @lkey - the lkey that is being de-registered
720 *
721 * This routine checks if the lkey is being used by
722 * the qp.
723 *
724 * If so, the qp is put into an error state to elminate
725 * any references from the qp.
726 */
727void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey)
728{
729 bool lastwqe = false;
730
731 if (qp->ibqp.qp_type == IB_QPT_SMI ||
732 qp->ibqp.qp_type == IB_QPT_GSI)
733 /* avoid special QPs */
734 return;
735 spin_lock_irq(&qp->r_lock);
736 spin_lock(&qp->s_hlock);
737 spin_lock(&qp->s_lock);
738
739 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
740 goto check_lwqe;
741
742 if (rvt_ss_has_lkey(&qp->r_sge, lkey) ||
743 rvt_qp_sends_has_lkey(qp, lkey) ||
744 rvt_qp_acks_has_lkey(qp, lkey))
745 lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR);
746check_lwqe:
747 spin_unlock(&qp->s_lock);
748 spin_unlock(&qp->s_hlock);
749 spin_unlock_irq(&qp->r_lock);
750 if (lastwqe) {
751 struct ib_event ev;
752
753 ev.device = qp->ibqp.device;
754 ev.element.qp = &qp->ibqp;
755 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
756 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
757 }
758}
759
760/**
761 * rvt_remove_qp - remove qp form table
762 * @rdi: rvt dev struct
763 * @qp: qp to remove
764 *
765 * Remove the QP from the table so it can't be found asynchronously by
766 * the receive routine.
767 */
768static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
769{
770 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
771 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
772 unsigned long flags;
773 int removed = 1;
774
775 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
776
777 if (rcu_dereference_protected(rvp->qp[0],
778 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
779 RCU_INIT_POINTER(rvp->qp[0], NULL);
780 } else if (rcu_dereference_protected(rvp->qp[1],
781 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
782 RCU_INIT_POINTER(rvp->qp[1], NULL);
783 } else {
784 struct rvt_qp *q;
785 struct rvt_qp __rcu **qpp;
786
787 removed = 0;
788 qpp = &rdi->qp_dev->qp_table[n];
789 for (; (q = rcu_dereference_protected(*qpp,
790 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
791 qpp = &q->next) {
792 if (q == qp) {
793 RCU_INIT_POINTER(*qpp,
794 rcu_dereference_protected(qp->next,
795 lockdep_is_held(&rdi->qp_dev->qpt_lock)));
796 removed = 1;
797 trace_rvt_qpremove(qp, n);
798 break;
799 }
800 }
801 }
802
803 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
804 if (removed) {
805 synchronize_rcu();
806 rvt_put_qp(qp);
807 }
808}
809
810/**
David Brazdil0f672f62019-12-10 10:32:29 +0000811 * rvt_alloc_rq - allocate memory for user or kernel buffer
812 * @rq: receive queue data structure
813 * @size: number of request queue entries
814 * @node: The NUMA node
815 * @udata: True if user data is available or not false
816 *
817 * Return: If memory allocation failed, return -ENONEM
818 * This function is used by both shared receive
819 * queues and non-shared receive queues to allocate
820 * memory.
821 */
822int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
823 struct ib_udata *udata)
824{
825 if (udata) {
826 rq->wq = vmalloc_user(sizeof(struct rvt_rwq) + size);
827 if (!rq->wq)
828 goto bail;
829 /* need kwq with no buffers */
830 rq->kwq = kzalloc_node(sizeof(*rq->kwq), GFP_KERNEL, node);
831 if (!rq->kwq)
832 goto bail;
833 rq->kwq->curr_wq = rq->wq->wq;
834 } else {
835 /* need kwq with buffers */
836 rq->kwq =
837 vzalloc_node(sizeof(struct rvt_krwq) + size, node);
838 if (!rq->kwq)
839 goto bail;
840 rq->kwq->curr_wq = rq->kwq->wq;
841 }
842
843 spin_lock_init(&rq->kwq->p_lock);
844 spin_lock_init(&rq->kwq->c_lock);
845 return 0;
846bail:
847 rvt_free_rq(rq);
848 return -ENOMEM;
849}
850
851/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000852 * rvt_init_qp - initialize the QP state to the reset state
853 * @qp: the QP to init or reinit
854 * @type: the QP type
855 *
856 * This function is called from both rvt_create_qp() and
857 * rvt_reset_qp(). The difference is that the reset
858 * patch the necessary locks to protect against concurent
859 * access.
860 */
861static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
862 enum ib_qp_type type)
863{
864 qp->remote_qpn = 0;
865 qp->qkey = 0;
866 qp->qp_access_flags = 0;
867 qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
868 qp->s_hdrwords = 0;
869 qp->s_wqe = NULL;
870 qp->s_draining = 0;
871 qp->s_next_psn = 0;
872 qp->s_last_psn = 0;
873 qp->s_sending_psn = 0;
874 qp->s_sending_hpsn = 0;
875 qp->s_psn = 0;
876 qp->r_psn = 0;
877 qp->r_msn = 0;
878 if (type == IB_QPT_RC) {
879 qp->s_state = IB_OPCODE_RC_SEND_LAST;
880 qp->r_state = IB_OPCODE_RC_SEND_LAST;
881 } else {
882 qp->s_state = IB_OPCODE_UC_SEND_LAST;
883 qp->r_state = IB_OPCODE_UC_SEND_LAST;
884 }
885 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
886 qp->r_nak_state = 0;
887 qp->r_aflags = 0;
888 qp->r_flags = 0;
889 qp->s_head = 0;
890 qp->s_tail = 0;
891 qp->s_cur = 0;
892 qp->s_acked = 0;
893 qp->s_last = 0;
894 qp->s_ssn = 1;
895 qp->s_lsn = 0;
896 qp->s_mig_state = IB_MIG_MIGRATED;
897 qp->r_head_ack_queue = 0;
898 qp->s_tail_ack_queue = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000899 qp->s_acked_ack_queue = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000900 qp->s_num_rd_atomic = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000901 qp->r_sge.num_sge = 0;
902 atomic_set(&qp->s_reserved_used, 0);
903}
904
905/**
Olivier Deprez0e641232021-09-23 10:07:05 +0200906 * _rvt_reset_qp - initialize the QP state to the reset state
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000907 * @qp: the QP to reset
908 * @type: the QP type
909 *
910 * r_lock, s_hlock, and s_lock are required to be held by the caller
911 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200912static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
913 enum ib_qp_type type)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000914 __must_hold(&qp->s_lock)
915 __must_hold(&qp->s_hlock)
916 __must_hold(&qp->r_lock)
917{
918 lockdep_assert_held(&qp->r_lock);
919 lockdep_assert_held(&qp->s_hlock);
920 lockdep_assert_held(&qp->s_lock);
921 if (qp->state != IB_QPS_RESET) {
922 qp->state = IB_QPS_RESET;
923
924 /* Let drivers flush their waitlist */
925 rdi->driver_f.flush_qp_waiters(qp);
926 rvt_stop_rc_timers(qp);
927 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
928 spin_unlock(&qp->s_lock);
929 spin_unlock(&qp->s_hlock);
930 spin_unlock_irq(&qp->r_lock);
931
932 /* Stop the send queue and the retry timer */
933 rdi->driver_f.stop_send_queue(qp);
934 rvt_del_timers_sync(qp);
935 /* Wait for things to stop */
936 rdi->driver_f.quiesce_qp(qp);
937
938 /* take qp out the hash and wait for it to be unused */
939 rvt_remove_qp(rdi, qp);
940
941 /* grab the lock b/c it was locked at call time */
942 spin_lock_irq(&qp->r_lock);
943 spin_lock(&qp->s_hlock);
944 spin_lock(&qp->s_lock);
945
946 rvt_clear_mr_refs(qp, 1);
947 /*
948 * Let the driver do any tear down or re-init it needs to for
949 * a qp that has been reset
950 */
951 rdi->driver_f.notify_qp_reset(qp);
952 }
953 rvt_init_qp(rdi, qp, type);
954 lockdep_assert_held(&qp->r_lock);
955 lockdep_assert_held(&qp->s_hlock);
956 lockdep_assert_held(&qp->s_lock);
957}
958
Olivier Deprez0e641232021-09-23 10:07:05 +0200959/**
960 * rvt_reset_qp - initialize the QP state to the reset state
961 * @rdi: the device info
962 * @qp: the QP to reset
963 * @type: the QP type
964 *
965 * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
966 * before calling _rvt_reset_qp().
967 */
968static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
969 enum ib_qp_type type)
970{
971 spin_lock_irq(&qp->r_lock);
972 spin_lock(&qp->s_hlock);
973 spin_lock(&qp->s_lock);
974 _rvt_reset_qp(rdi, qp, type);
975 spin_unlock(&qp->s_lock);
976 spin_unlock(&qp->s_hlock);
977 spin_unlock_irq(&qp->r_lock);
978}
979
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000980/** rvt_free_qpn - Free a qpn from the bit map
981 * @qpt: QP table
982 * @qpn: queue pair number to free
983 */
984static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
985{
986 struct rvt_qpn_map *map;
987
988 map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
989 if (map->page)
990 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
991}
992
993/**
David Brazdil0f672f62019-12-10 10:32:29 +0000994 * get_allowed_ops - Given a QP type return the appropriate allowed OP
995 * @type: valid, supported, QP type
996 */
997static u8 get_allowed_ops(enum ib_qp_type type)
998{
999 return type == IB_QPT_RC ? IB_OPCODE_RC : type == IB_QPT_UC ?
1000 IB_OPCODE_UC : IB_OPCODE_UD;
1001}
1002
1003/**
1004 * free_ud_wq_attr - Clean up AH attribute cache for UD QPs
1005 * @qp: Valid QP with allowed_ops set
1006 *
1007 * The rvt_swqe data structure being used is a union, so this is
1008 * only valid for UD QPs.
1009 */
1010static void free_ud_wq_attr(struct rvt_qp *qp)
1011{
1012 struct rvt_swqe *wqe;
1013 int i;
1014
1015 for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
1016 wqe = rvt_get_swqe_ptr(qp, i);
1017 kfree(wqe->ud_wr.attr);
1018 wqe->ud_wr.attr = NULL;
1019 }
1020}
1021
1022/**
1023 * alloc_ud_wq_attr - AH attribute cache for UD QPs
1024 * @qp: Valid QP with allowed_ops set
1025 * @node: Numa node for allocation
1026 *
1027 * The rvt_swqe data structure being used is a union, so this is
1028 * only valid for UD QPs.
1029 */
1030static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
1031{
1032 struct rvt_swqe *wqe;
1033 int i;
1034
1035 for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
1036 wqe = rvt_get_swqe_ptr(qp, i);
1037 wqe->ud_wr.attr = kzalloc_node(sizeof(*wqe->ud_wr.attr),
1038 GFP_KERNEL, node);
1039 if (!wqe->ud_wr.attr) {
1040 free_ud_wq_attr(qp);
1041 return -ENOMEM;
1042 }
1043 }
1044
1045 return 0;
1046}
1047
1048/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001049 * rvt_create_qp - create a queue pair for a device
1050 * @ibpd: the protection domain who's device we create the queue pair for
1051 * @init_attr: the attributes of the queue pair
1052 * @udata: user data for libibverbs.so
1053 *
1054 * Queue pair creation is mostly an rvt issue. However, drivers have their own
1055 * unique idea of what queue pair numbers mean. For instance there is a reserved
1056 * range for PSM.
1057 *
1058 * Return: the queue pair on success, otherwise returns an errno.
1059 *
1060 * Called by the ib_create_qp() core verbs function.
1061 */
1062struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
1063 struct ib_qp_init_attr *init_attr,
1064 struct ib_udata *udata)
1065{
1066 struct rvt_qp *qp;
1067 int err;
1068 struct rvt_swqe *swq = NULL;
1069 size_t sz;
1070 size_t sg_list_sz;
1071 struct ib_qp *ret = ERR_PTR(-ENOMEM);
1072 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
1073 void *priv = NULL;
1074 size_t sqsize;
1075
1076 if (!rdi)
1077 return ERR_PTR(-EINVAL);
1078
1079 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
1080 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
1081 init_attr->create_flags)
1082 return ERR_PTR(-EINVAL);
1083
1084 /* Check receive queue parameters if no SRQ is specified. */
1085 if (!init_attr->srq) {
1086 if (init_attr->cap.max_recv_sge >
1087 rdi->dparms.props.max_recv_sge ||
1088 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
1089 return ERR_PTR(-EINVAL);
1090
1091 if (init_attr->cap.max_send_sge +
1092 init_attr->cap.max_send_wr +
1093 init_attr->cap.max_recv_sge +
1094 init_attr->cap.max_recv_wr == 0)
1095 return ERR_PTR(-EINVAL);
1096 }
1097 sqsize =
1098 init_attr->cap.max_send_wr + 1 +
1099 rdi->dparms.reserved_operations;
1100 switch (init_attr->qp_type) {
1101 case IB_QPT_SMI:
1102 case IB_QPT_GSI:
1103 if (init_attr->port_num == 0 ||
1104 init_attr->port_num > ibpd->device->phys_port_cnt)
1105 return ERR_PTR(-EINVAL);
1106 /* fall through */
1107 case IB_QPT_UC:
1108 case IB_QPT_RC:
1109 case IB_QPT_UD:
David Brazdil0f672f62019-12-10 10:32:29 +00001110 sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001111 swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
1112 if (!swq)
1113 return ERR_PTR(-ENOMEM);
1114
1115 sz = sizeof(*qp);
1116 sg_list_sz = 0;
1117 if (init_attr->srq) {
1118 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
1119
1120 if (srq->rq.max_sge > 1)
1121 sg_list_sz = sizeof(*qp->r_sg_list) *
1122 (srq->rq.max_sge - 1);
1123 } else if (init_attr->cap.max_recv_sge > 1)
1124 sg_list_sz = sizeof(*qp->r_sg_list) *
1125 (init_attr->cap.max_recv_sge - 1);
1126 qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
1127 rdi->dparms.node);
1128 if (!qp)
1129 goto bail_swq;
David Brazdil0f672f62019-12-10 10:32:29 +00001130 qp->allowed_ops = get_allowed_ops(init_attr->qp_type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001131
1132 RCU_INIT_POINTER(qp->next, NULL);
1133 if (init_attr->qp_type == IB_QPT_RC) {
1134 qp->s_ack_queue =
1135 kcalloc_node(rvt_max_atomic(rdi),
1136 sizeof(*qp->s_ack_queue),
1137 GFP_KERNEL,
1138 rdi->dparms.node);
1139 if (!qp->s_ack_queue)
1140 goto bail_qp;
1141 }
1142 /* initialize timers needed for rc qp */
1143 timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
1144 hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
1145 HRTIMER_MODE_REL);
1146 qp->s_rnr_timer.function = rvt_rc_rnr_retry;
1147
1148 /*
1149 * Driver needs to set up it's private QP structure and do any
1150 * initialization that is needed.
1151 */
1152 priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
1153 if (IS_ERR(priv)) {
1154 ret = priv;
1155 goto bail_qp;
1156 }
1157 qp->priv = priv;
1158 qp->timeout_jiffies =
1159 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1160 1000UL);
1161 if (init_attr->srq) {
1162 sz = 0;
1163 } else {
1164 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1165 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1166 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1167 sizeof(struct rvt_rwqe);
David Brazdil0f672f62019-12-10 10:32:29 +00001168 err = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz,
1169 rdi->dparms.node, udata);
1170 if (err) {
1171 ret = ERR_PTR(err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001172 goto bail_driver_priv;
David Brazdil0f672f62019-12-10 10:32:29 +00001173 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001174 }
1175
1176 /*
1177 * ib_create_qp() will initialize qp->ibqp
1178 * except for qp->ibqp.qp_num.
1179 */
1180 spin_lock_init(&qp->r_lock);
1181 spin_lock_init(&qp->s_hlock);
1182 spin_lock_init(&qp->s_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001183 atomic_set(&qp->refcount, 0);
1184 atomic_set(&qp->local_ops_pending, 0);
1185 init_waitqueue_head(&qp->wait);
1186 INIT_LIST_HEAD(&qp->rspwait);
1187 qp->state = IB_QPS_RESET;
1188 qp->s_wq = swq;
1189 qp->s_size = sqsize;
1190 qp->s_avail = init_attr->cap.max_send_wr;
1191 qp->s_max_sge = init_attr->cap.max_send_sge;
1192 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1193 qp->s_flags = RVT_S_SIGNAL_REQ_WR;
David Brazdil0f672f62019-12-10 10:32:29 +00001194 err = alloc_ud_wq_attr(qp, rdi->dparms.node);
1195 if (err) {
1196 ret = (ERR_PTR(err));
Olivier Deprez0e641232021-09-23 10:07:05 +02001197 goto bail_rq_rvt;
David Brazdil0f672f62019-12-10 10:32:29 +00001198 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001199
1200 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
1201 init_attr->qp_type,
1202 init_attr->port_num);
1203 if (err < 0) {
1204 ret = ERR_PTR(err);
1205 goto bail_rq_wq;
1206 }
1207 qp->ibqp.qp_num = err;
1208 qp->port_num = init_attr->port_num;
1209 rvt_init_qp(rdi, qp, init_attr->qp_type);
David Brazdil0f672f62019-12-10 10:32:29 +00001210 if (rdi->driver_f.qp_priv_init) {
1211 err = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
1212 if (err) {
1213 ret = ERR_PTR(err);
1214 goto bail_rq_wq;
1215 }
1216 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001217 break;
1218
1219 default:
1220 /* Don't support raw QPs */
1221 return ERR_PTR(-EINVAL);
1222 }
1223
1224 init_attr->cap.max_inline_data = 0;
1225
1226 /*
1227 * Return the address of the RWQ as the offset to mmap.
1228 * See rvt_mmap() for details.
1229 */
1230 if (udata && udata->outlen >= sizeof(__u64)) {
1231 if (!qp->r_rq.wq) {
1232 __u64 offset = 0;
1233
1234 err = ib_copy_to_udata(udata, &offset,
1235 sizeof(offset));
1236 if (err) {
1237 ret = ERR_PTR(err);
1238 goto bail_qpn;
1239 }
1240 } else {
1241 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
1242
David Brazdil0f672f62019-12-10 10:32:29 +00001243 qp->ip = rvt_create_mmap_info(rdi, s, udata,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001244 qp->r_rq.wq);
Olivier Deprez0e641232021-09-23 10:07:05 +02001245 if (IS_ERR(qp->ip)) {
1246 ret = ERR_CAST(qp->ip);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001247 goto bail_qpn;
1248 }
1249
1250 err = ib_copy_to_udata(udata, &qp->ip->offset,
1251 sizeof(qp->ip->offset));
1252 if (err) {
1253 ret = ERR_PTR(err);
1254 goto bail_ip;
1255 }
1256 }
1257 qp->pid = current->pid;
1258 }
1259
1260 spin_lock(&rdi->n_qps_lock);
1261 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
1262 spin_unlock(&rdi->n_qps_lock);
1263 ret = ERR_PTR(-ENOMEM);
1264 goto bail_ip;
1265 }
1266
1267 rdi->n_qps_allocated++;
1268 /*
1269 * Maintain a busy_jiffies variable that will be added to the timeout
1270 * period in mod_retry_timer and add_retry_timer. This busy jiffies
1271 * is scaled by the number of rc qps created for the device to reduce
1272 * the number of timeouts occurring when there is a large number of
1273 * qps. busy_jiffies is incremented every rc qp scaling interval.
1274 * The scaling interval is selected based on extensive performance
1275 * evaluation of targeted workloads.
1276 */
1277 if (init_attr->qp_type == IB_QPT_RC) {
1278 rdi->n_rc_qps++;
1279 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1280 }
1281 spin_unlock(&rdi->n_qps_lock);
1282
1283 if (qp->ip) {
1284 spin_lock_irq(&rdi->pending_lock);
1285 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
1286 spin_unlock_irq(&rdi->pending_lock);
1287 }
1288
1289 ret = &qp->ibqp;
1290
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001291 return ret;
1292
1293bail_ip:
1294 if (qp->ip)
1295 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1296
1297bail_qpn:
1298 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1299
1300bail_rq_wq:
David Brazdil0f672f62019-12-10 10:32:29 +00001301 free_ud_wq_attr(qp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001302
Olivier Deprez0e641232021-09-23 10:07:05 +02001303bail_rq_rvt:
1304 rvt_free_rq(&qp->r_rq);
1305
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001306bail_driver_priv:
1307 rdi->driver_f.qp_priv_free(rdi, qp);
1308
1309bail_qp:
1310 kfree(qp->s_ack_queue);
1311 kfree(qp);
1312
1313bail_swq:
1314 vfree(swq);
1315
1316 return ret;
1317}
1318
1319/**
1320 * rvt_error_qp - put a QP into the error state
1321 * @qp: the QP to put into the error state
1322 * @err: the receive completion error to signal if a RWQE is active
1323 *
1324 * Flushes both send and receive work queues.
1325 *
1326 * Return: true if last WQE event should be generated.
1327 * The QP r_lock and s_lock should be held and interrupts disabled.
1328 * If we are already in error state, just return.
1329 */
1330int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
1331{
1332 struct ib_wc wc;
1333 int ret = 0;
1334 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1335
1336 lockdep_assert_held(&qp->r_lock);
1337 lockdep_assert_held(&qp->s_lock);
1338 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
1339 goto bail;
1340
1341 qp->state = IB_QPS_ERR;
1342
1343 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1344 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
1345 del_timer(&qp->s_timer);
1346 }
1347
1348 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
1349 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
1350
1351 rdi->driver_f.notify_error_qp(qp);
1352
1353 /* Schedule the sending tasklet to drain the send work queue. */
1354 if (READ_ONCE(qp->s_last) != qp->s_head)
1355 rdi->driver_f.schedule_send(qp);
1356
1357 rvt_clear_mr_refs(qp, 0);
1358
1359 memset(&wc, 0, sizeof(wc));
1360 wc.qp = &qp->ibqp;
1361 wc.opcode = IB_WC_RECV;
1362
1363 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
1364 wc.wr_id = qp->r_wr_id;
1365 wc.status = err;
1366 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1367 }
1368 wc.status = IB_WC_WR_FLUSH_ERR;
1369
David Brazdil0f672f62019-12-10 10:32:29 +00001370 if (qp->r_rq.kwq) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001371 u32 head;
1372 u32 tail;
David Brazdil0f672f62019-12-10 10:32:29 +00001373 struct rvt_rwq *wq = NULL;
1374 struct rvt_krwq *kwq = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001375
David Brazdil0f672f62019-12-10 10:32:29 +00001376 spin_lock(&qp->r_rq.kwq->c_lock);
1377 /* qp->ip used to validate if there is a user buffer mmaped */
1378 if (qp->ip) {
1379 wq = qp->r_rq.wq;
1380 head = RDMA_READ_UAPI_ATOMIC(wq->head);
1381 tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
1382 } else {
1383 kwq = qp->r_rq.kwq;
1384 head = kwq->head;
1385 tail = kwq->tail;
1386 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001387 /* sanity check pointers before trusting them */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001388 if (head >= qp->r_rq.size)
1389 head = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001390 if (tail >= qp->r_rq.size)
1391 tail = 0;
1392 while (tail != head) {
1393 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
1394 if (++tail >= qp->r_rq.size)
1395 tail = 0;
1396 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1397 }
David Brazdil0f672f62019-12-10 10:32:29 +00001398 if (qp->ip)
1399 RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
1400 else
1401 kwq->tail = tail;
1402 spin_unlock(&qp->r_rq.kwq->c_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001403 } else if (qp->ibqp.event_handler) {
1404 ret = 1;
1405 }
1406
1407bail:
1408 return ret;
1409}
1410EXPORT_SYMBOL(rvt_error_qp);
1411
1412/*
1413 * Put the QP into the hash table.
1414 * The hash table holds a reference to the QP.
1415 */
1416static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
1417{
1418 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1419 unsigned long flags;
1420
1421 rvt_get_qp(qp);
1422 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
1423
1424 if (qp->ibqp.qp_num <= 1) {
1425 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
1426 } else {
1427 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1428
1429 qp->next = rdi->qp_dev->qp_table[n];
1430 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1431 trace_rvt_qpinsert(qp, n);
1432 }
1433
1434 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
1435}
1436
1437/**
1438 * rvt_modify_qp - modify the attributes of a queue pair
1439 * @ibqp: the queue pair who's attributes we're modifying
1440 * @attr: the new attributes
1441 * @attr_mask: the mask of attributes to modify
1442 * @udata: user data for libibverbs.so
1443 *
1444 * Return: 0 on success, otherwise returns an errno.
1445 */
1446int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1447 int attr_mask, struct ib_udata *udata)
1448{
1449 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1450 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1451 enum ib_qp_state cur_state, new_state;
1452 struct ib_event ev;
1453 int lastwqe = 0;
1454 int mig = 0;
1455 int pmtu = 0; /* for gcc warning only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001456 int opa_ah;
1457
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001458 spin_lock_irq(&qp->r_lock);
1459 spin_lock(&qp->s_hlock);
1460 spin_lock(&qp->s_lock);
1461
1462 cur_state = attr_mask & IB_QP_CUR_STATE ?
1463 attr->cur_qp_state : qp->state;
1464 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1465 opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
1466
1467 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
David Brazdil0f672f62019-12-10 10:32:29 +00001468 attr_mask))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001469 goto inval;
1470
1471 if (rdi->driver_f.check_modify_qp &&
1472 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1473 goto inval;
1474
1475 if (attr_mask & IB_QP_AV) {
1476 if (opa_ah) {
1477 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1478 opa_get_mcast_base(OPA_MCAST_NR))
1479 goto inval;
1480 } else {
1481 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1482 be16_to_cpu(IB_MULTICAST_LID_BASE))
1483 goto inval;
1484 }
1485
1486 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1487 goto inval;
1488 }
1489
1490 if (attr_mask & IB_QP_ALT_PATH) {
1491 if (opa_ah) {
1492 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1493 opa_get_mcast_base(OPA_MCAST_NR))
1494 goto inval;
1495 } else {
1496 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1497 be16_to_cpu(IB_MULTICAST_LID_BASE))
1498 goto inval;
1499 }
1500
1501 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1502 goto inval;
1503 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1504 goto inval;
1505 }
1506
1507 if (attr_mask & IB_QP_PKEY_INDEX)
1508 if (attr->pkey_index >= rvt_get_npkeys(rdi))
1509 goto inval;
1510
1511 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1512 if (attr->min_rnr_timer > 31)
1513 goto inval;
1514
1515 if (attr_mask & IB_QP_PORT)
1516 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1517 qp->ibqp.qp_type == IB_QPT_GSI ||
1518 attr->port_num == 0 ||
1519 attr->port_num > ibqp->device->phys_port_cnt)
1520 goto inval;
1521
1522 if (attr_mask & IB_QP_DEST_QPN)
1523 if (attr->dest_qp_num > RVT_QPN_MASK)
1524 goto inval;
1525
1526 if (attr_mask & IB_QP_RETRY_CNT)
1527 if (attr->retry_cnt > 7)
1528 goto inval;
1529
1530 if (attr_mask & IB_QP_RNR_RETRY)
1531 if (attr->rnr_retry > 7)
1532 goto inval;
1533
1534 /*
1535 * Don't allow invalid path_mtu values. OK to set greater
1536 * than the active mtu (or even the max_cap, if we have tuned
1537 * that to a small mtu. We'll set qp->path_mtu
1538 * to the lesser of requested attribute mtu and active,
1539 * for packetizing messages.
1540 * Note that the QP port has to be set in INIT and MTU in RTR.
1541 */
1542 if (attr_mask & IB_QP_PATH_MTU) {
1543 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1544 if (pmtu < 0)
1545 goto inval;
1546 }
1547
1548 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1549 if (attr->path_mig_state == IB_MIG_REARM) {
1550 if (qp->s_mig_state == IB_MIG_ARMED)
1551 goto inval;
1552 if (new_state != IB_QPS_RTS)
1553 goto inval;
1554 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1555 if (qp->s_mig_state == IB_MIG_REARM)
1556 goto inval;
1557 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1558 goto inval;
1559 if (qp->s_mig_state == IB_MIG_ARMED)
1560 mig = 1;
1561 } else {
1562 goto inval;
1563 }
1564 }
1565
1566 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1567 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1568 goto inval;
1569
1570 switch (new_state) {
1571 case IB_QPS_RESET:
1572 if (qp->state != IB_QPS_RESET)
Olivier Deprez0e641232021-09-23 10:07:05 +02001573 _rvt_reset_qp(rdi, qp, ibqp->qp_type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001574 break;
1575
1576 case IB_QPS_RTR:
1577 /* Allow event to re-trigger if QP set to RTR more than once */
1578 qp->r_flags &= ~RVT_R_COMM_EST;
1579 qp->state = new_state;
1580 break;
1581
1582 case IB_QPS_SQD:
1583 qp->s_draining = qp->s_last != qp->s_cur;
1584 qp->state = new_state;
1585 break;
1586
1587 case IB_QPS_SQE:
1588 if (qp->ibqp.qp_type == IB_QPT_RC)
1589 goto inval;
1590 qp->state = new_state;
1591 break;
1592
1593 case IB_QPS_ERR:
1594 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1595 break;
1596
1597 default:
1598 qp->state = new_state;
1599 break;
1600 }
1601
1602 if (attr_mask & IB_QP_PKEY_INDEX)
1603 qp->s_pkey_index = attr->pkey_index;
1604
1605 if (attr_mask & IB_QP_PORT)
1606 qp->port_num = attr->port_num;
1607
1608 if (attr_mask & IB_QP_DEST_QPN)
1609 qp->remote_qpn = attr->dest_qp_num;
1610
1611 if (attr_mask & IB_QP_SQ_PSN) {
1612 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1613 qp->s_psn = qp->s_next_psn;
1614 qp->s_sending_psn = qp->s_next_psn;
1615 qp->s_last_psn = qp->s_next_psn - 1;
1616 qp->s_sending_hpsn = qp->s_last_psn;
1617 }
1618
1619 if (attr_mask & IB_QP_RQ_PSN)
1620 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1621
1622 if (attr_mask & IB_QP_ACCESS_FLAGS)
1623 qp->qp_access_flags = attr->qp_access_flags;
1624
1625 if (attr_mask & IB_QP_AV) {
1626 rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr);
1627 qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
1628 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1629 }
1630
1631 if (attr_mask & IB_QP_ALT_PATH) {
1632 rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr);
1633 qp->s_alt_pkey_index = attr->alt_pkey_index;
1634 }
1635
1636 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1637 qp->s_mig_state = attr->path_mig_state;
1638 if (mig) {
1639 qp->remote_ah_attr = qp->alt_ah_attr;
1640 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
1641 qp->s_pkey_index = qp->s_alt_pkey_index;
1642 }
1643 }
1644
1645 if (attr_mask & IB_QP_PATH_MTU) {
1646 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1647 qp->log_pmtu = ilog2(qp->pmtu);
1648 }
1649
1650 if (attr_mask & IB_QP_RETRY_CNT) {
1651 qp->s_retry_cnt = attr->retry_cnt;
1652 qp->s_retry = attr->retry_cnt;
1653 }
1654
1655 if (attr_mask & IB_QP_RNR_RETRY) {
1656 qp->s_rnr_retry_cnt = attr->rnr_retry;
1657 qp->s_rnr_retry = attr->rnr_retry;
1658 }
1659
1660 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1661 qp->r_min_rnr_timer = attr->min_rnr_timer;
1662
1663 if (attr_mask & IB_QP_TIMEOUT) {
1664 qp->timeout = attr->timeout;
1665 qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
1666 }
1667
1668 if (attr_mask & IB_QP_QKEY)
1669 qp->qkey = attr->qkey;
1670
1671 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1672 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1673
1674 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1675 qp->s_max_rd_atomic = attr->max_rd_atomic;
1676
1677 if (rdi->driver_f.modify_qp)
1678 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1679
1680 spin_unlock(&qp->s_lock);
1681 spin_unlock(&qp->s_hlock);
1682 spin_unlock_irq(&qp->r_lock);
1683
1684 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1685 rvt_insert_qp(rdi, qp);
1686
1687 if (lastwqe) {
1688 ev.device = qp->ibqp.device;
1689 ev.element.qp = &qp->ibqp;
1690 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1691 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1692 }
1693 if (mig) {
1694 ev.device = qp->ibqp.device;
1695 ev.element.qp = &qp->ibqp;
1696 ev.event = IB_EVENT_PATH_MIG;
1697 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1698 }
1699 return 0;
1700
1701inval:
1702 spin_unlock(&qp->s_lock);
1703 spin_unlock(&qp->s_hlock);
1704 spin_unlock_irq(&qp->r_lock);
1705 return -EINVAL;
1706}
1707
1708/**
1709 * rvt_destroy_qp - destroy a queue pair
1710 * @ibqp: the queue pair to destroy
1711 *
1712 * Note that this can be called while the QP is actively sending or
1713 * receiving!
1714 *
1715 * Return: 0 on success.
1716 */
David Brazdil0f672f62019-12-10 10:32:29 +00001717int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001718{
1719 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1720 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1721
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001722 rvt_reset_qp(rdi, qp, ibqp->qp_type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001723
1724 wait_event(qp->wait, !atomic_read(&qp->refcount));
1725 /* qpn is now available for use again */
1726 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1727
1728 spin_lock(&rdi->n_qps_lock);
1729 rdi->n_qps_allocated--;
1730 if (qp->ibqp.qp_type == IB_QPT_RC) {
1731 rdi->n_rc_qps--;
1732 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1733 }
1734 spin_unlock(&rdi->n_qps_lock);
1735
1736 if (qp->ip)
1737 kref_put(&qp->ip->ref, rvt_release_mmap_info);
David Brazdil0f672f62019-12-10 10:32:29 +00001738 kvfree(qp->r_rq.kwq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001739 rdi->driver_f.qp_priv_free(rdi, qp);
1740 kfree(qp->s_ack_queue);
1741 rdma_destroy_ah_attr(&qp->remote_ah_attr);
1742 rdma_destroy_ah_attr(&qp->alt_ah_attr);
David Brazdil0f672f62019-12-10 10:32:29 +00001743 free_ud_wq_attr(qp);
1744 vfree(qp->s_wq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001745 kfree(qp);
1746 return 0;
1747}
1748
1749/**
1750 * rvt_query_qp - query an ipbq
1751 * @ibqp: IB qp to query
1752 * @attr: attr struct to fill in
1753 * @attr_mask: attr mask ignored
1754 * @init_attr: struct to fill in
1755 *
1756 * Return: always 0
1757 */
1758int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1759 int attr_mask, struct ib_qp_init_attr *init_attr)
1760{
1761 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1762 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1763
1764 attr->qp_state = qp->state;
1765 attr->cur_qp_state = attr->qp_state;
1766 attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1767 attr->path_mig_state = qp->s_mig_state;
1768 attr->qkey = qp->qkey;
1769 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1770 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1771 attr->dest_qp_num = qp->remote_qpn;
1772 attr->qp_access_flags = qp->qp_access_flags;
1773 attr->cap.max_send_wr = qp->s_size - 1 -
1774 rdi->dparms.reserved_operations;
1775 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1776 attr->cap.max_send_sge = qp->s_max_sge;
1777 attr->cap.max_recv_sge = qp->r_rq.max_sge;
1778 attr->cap.max_inline_data = 0;
1779 attr->ah_attr = qp->remote_ah_attr;
1780 attr->alt_ah_attr = qp->alt_ah_attr;
1781 attr->pkey_index = qp->s_pkey_index;
1782 attr->alt_pkey_index = qp->s_alt_pkey_index;
1783 attr->en_sqd_async_notify = 0;
1784 attr->sq_draining = qp->s_draining;
1785 attr->max_rd_atomic = qp->s_max_rd_atomic;
1786 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1787 attr->min_rnr_timer = qp->r_min_rnr_timer;
1788 attr->port_num = qp->port_num;
1789 attr->timeout = qp->timeout;
1790 attr->retry_cnt = qp->s_retry_cnt;
1791 attr->rnr_retry = qp->s_rnr_retry_cnt;
1792 attr->alt_port_num =
1793 rdma_ah_get_port_num(&qp->alt_ah_attr);
1794 attr->alt_timeout = qp->alt_timeout;
1795
1796 init_attr->event_handler = qp->ibqp.event_handler;
1797 init_attr->qp_context = qp->ibqp.qp_context;
1798 init_attr->send_cq = qp->ibqp.send_cq;
1799 init_attr->recv_cq = qp->ibqp.recv_cq;
1800 init_attr->srq = qp->ibqp.srq;
1801 init_attr->cap = attr->cap;
1802 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1803 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1804 else
1805 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1806 init_attr->qp_type = qp->ibqp.qp_type;
1807 init_attr->port_num = qp->port_num;
1808 return 0;
1809}
1810
1811/**
1812 * rvt_post_receive - post a receive on a QP
1813 * @ibqp: the QP to post the receive on
1814 * @wr: the WR to post
1815 * @bad_wr: the first bad WR is put here
1816 *
1817 * This may be called from interrupt context.
1818 *
1819 * Return: 0 on success otherwise errno
1820 */
1821int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1822 const struct ib_recv_wr **bad_wr)
1823{
1824 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
David Brazdil0f672f62019-12-10 10:32:29 +00001825 struct rvt_krwq *wq = qp->r_rq.kwq;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001826 unsigned long flags;
1827 int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1828 !qp->ibqp.srq;
1829
1830 /* Check that state is OK to post receive. */
1831 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1832 *bad_wr = wr;
1833 return -EINVAL;
1834 }
1835
1836 for (; wr; wr = wr->next) {
1837 struct rvt_rwqe *wqe;
1838 u32 next;
1839 int i;
1840
1841 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1842 *bad_wr = wr;
1843 return -EINVAL;
1844 }
1845
David Brazdil0f672f62019-12-10 10:32:29 +00001846 spin_lock_irqsave(&qp->r_rq.kwq->p_lock, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001847 next = wq->head + 1;
1848 if (next >= qp->r_rq.size)
1849 next = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001850 if (next == READ_ONCE(wq->tail)) {
1851 spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001852 *bad_wr = wr;
1853 return -ENOMEM;
1854 }
1855 if (unlikely(qp_err_flush)) {
1856 struct ib_wc wc;
1857
1858 memset(&wc, 0, sizeof(wc));
1859 wc.qp = &qp->ibqp;
1860 wc.opcode = IB_WC_RECV;
1861 wc.wr_id = wr->wr_id;
1862 wc.status = IB_WC_WR_FLUSH_ERR;
1863 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1864 } else {
1865 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1866 wqe->wr_id = wr->wr_id;
1867 wqe->num_sge = wr->num_sge;
David Brazdil0f672f62019-12-10 10:32:29 +00001868 for (i = 0; i < wr->num_sge; i++) {
1869 wqe->sg_list[i].addr = wr->sg_list[i].addr;
1870 wqe->sg_list[i].length = wr->sg_list[i].length;
1871 wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
1872 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001873 /*
1874 * Make sure queue entry is written
1875 * before the head index.
1876 */
David Brazdil0f672f62019-12-10 10:32:29 +00001877 smp_store_release(&wq->head, next);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001878 }
David Brazdil0f672f62019-12-10 10:32:29 +00001879 spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001880 }
1881 return 0;
1882}
1883
1884/**
1885 * rvt_qp_valid_operation - validate post send wr request
1886 * @qp - the qp
1887 * @post-parms - the post send table for the driver
1888 * @wr - the work request
1889 *
1890 * The routine validates the operation based on the
1891 * validation table an returns the length of the operation
1892 * which can extend beyond the ib_send_bw. Operation
1893 * dependent flags key atomic operation validation.
1894 *
1895 * There is an exception for UD qps that validates the pd and
1896 * overrides the length to include the additional UD specific
1897 * length.
1898 *
1899 * Returns a negative error or the length of the work request
1900 * for building the swqe.
1901 */
1902static inline int rvt_qp_valid_operation(
1903 struct rvt_qp *qp,
1904 const struct rvt_operation_params *post_parms,
1905 const struct ib_send_wr *wr)
1906{
1907 int len;
1908
1909 if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
1910 return -EINVAL;
1911 if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1912 return -EINVAL;
1913 if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
1914 ibpd_to_rvtpd(qp->ibqp.pd)->user)
1915 return -EINVAL;
1916 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
1917 (wr->num_sge == 0 ||
1918 wr->sg_list[0].length < sizeof(u64) ||
1919 wr->sg_list[0].addr & (sizeof(u64) - 1)))
1920 return -EINVAL;
1921 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
1922 !qp->s_max_rd_atomic)
1923 return -EINVAL;
1924 len = post_parms[wr->opcode].length;
1925 /* UD specific */
1926 if (qp->ibqp.qp_type != IB_QPT_UC &&
1927 qp->ibqp.qp_type != IB_QPT_RC) {
1928 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1929 return -EINVAL;
1930 len = sizeof(struct ib_ud_wr);
1931 }
1932 return len;
1933}
1934
1935/**
1936 * rvt_qp_is_avail - determine queue capacity
1937 * @qp: the qp
1938 * @rdi: the rdmavt device
1939 * @reserved_op: is reserved operation
1940 *
1941 * This assumes the s_hlock is held but the s_last
1942 * qp variable is uncontrolled.
1943 *
1944 * For non reserved operations, the qp->s_avail
1945 * may be changed.
1946 *
1947 * The return value is zero or a -ENOMEM.
1948 */
1949static inline int rvt_qp_is_avail(
1950 struct rvt_qp *qp,
1951 struct rvt_dev_info *rdi,
1952 bool reserved_op)
1953{
1954 u32 slast;
1955 u32 avail;
1956 u32 reserved_used;
1957
1958 /* see rvt_qp_wqe_unreserve() */
1959 smp_mb__before_atomic();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001960 if (unlikely(reserved_op)) {
1961 /* see rvt_qp_wqe_unreserve() */
David Brazdil0f672f62019-12-10 10:32:29 +00001962 reserved_used = atomic_read(&qp->s_reserved_used);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001963 if (reserved_used >= rdi->dparms.reserved_operations)
1964 return -ENOMEM;
1965 return 0;
1966 }
1967 /* non-reserved operations */
1968 if (likely(qp->s_avail))
1969 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001970 /* See rvt_qp_complete_swqe() */
1971 slast = smp_load_acquire(&qp->s_last);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001972 if (qp->s_head >= slast)
1973 avail = qp->s_size - (qp->s_head - slast);
1974 else
1975 avail = slast - qp->s_head;
1976
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001977 reserved_used = atomic_read(&qp->s_reserved_used);
1978 avail = avail - 1 -
1979 (rdi->dparms.reserved_operations - reserved_used);
1980 /* insure we don't assign a negative s_avail */
1981 if ((s32)avail <= 0)
1982 return -ENOMEM;
1983 qp->s_avail = avail;
1984 if (WARN_ON(qp->s_avail >
1985 (qp->s_size - 1 - rdi->dparms.reserved_operations)))
1986 rvt_pr_err(rdi,
1987 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1988 qp->ibqp.qp_num, qp->s_size, qp->s_avail,
1989 qp->s_head, qp->s_tail, qp->s_cur,
1990 qp->s_acked, qp->s_last);
1991 return 0;
1992}
1993
1994/**
1995 * rvt_post_one_wr - post one RC, UC, or UD send work request
1996 * @qp: the QP to post on
1997 * @wr: the work request to send
1998 */
1999static int rvt_post_one_wr(struct rvt_qp *qp,
2000 const struct ib_send_wr *wr,
David Brazdil0f672f62019-12-10 10:32:29 +00002001 bool *call_send)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002002{
2003 struct rvt_swqe *wqe;
2004 u32 next;
2005 int i;
2006 int j;
2007 int acc;
2008 struct rvt_lkey_table *rkt;
2009 struct rvt_pd *pd;
2010 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2011 u8 log_pmtu;
2012 int ret;
2013 size_t cplen;
2014 bool reserved_op;
2015 int local_ops_delayed = 0;
2016
2017 BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
2018
2019 /* IB spec says that num_sge == 0 is OK. */
2020 if (unlikely(wr->num_sge > qp->s_max_sge))
2021 return -EINVAL;
2022
2023 ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
2024 if (ret < 0)
2025 return ret;
2026 cplen = ret;
2027
2028 /*
2029 * Local operations include fast register and local invalidate.
2030 * Fast register needs to be processed immediately because the
2031 * registered lkey may be used by following work requests and the
2032 * lkey needs to be valid at the time those requests are posted.
2033 * Local invalidate can be processed immediately if fencing is
2034 * not required and no previous local invalidate ops are pending.
2035 * Signaled local operations that have been processed immediately
2036 * need to have requests with "completion only" flags set posted
2037 * to the send queue in order to generate completions.
2038 */
2039 if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
2040 switch (wr->opcode) {
2041 case IB_WR_REG_MR:
2042 ret = rvt_fast_reg_mr(qp,
2043 reg_wr(wr)->mr,
2044 reg_wr(wr)->key,
2045 reg_wr(wr)->access);
2046 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
2047 return ret;
2048 break;
2049 case IB_WR_LOCAL_INV:
2050 if ((wr->send_flags & IB_SEND_FENCE) ||
2051 atomic_read(&qp->local_ops_pending)) {
2052 local_ops_delayed = 1;
2053 } else {
2054 ret = rvt_invalidate_rkey(
2055 qp, wr->ex.invalidate_rkey);
2056 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
2057 return ret;
2058 }
2059 break;
2060 default:
2061 return -EINVAL;
2062 }
2063 }
2064
2065 reserved_op = rdi->post_parms[wr->opcode].flags &
2066 RVT_OPERATION_USE_RESERVE;
2067 /* check for avail */
2068 ret = rvt_qp_is_avail(qp, rdi, reserved_op);
2069 if (ret)
2070 return ret;
2071 next = qp->s_head + 1;
2072 if (next >= qp->s_size)
2073 next = 0;
2074
2075 rkt = &rdi->lkey_table;
2076 pd = ibpd_to_rvtpd(qp->ibqp.pd);
2077 wqe = rvt_get_swqe_ptr(qp, qp->s_head);
2078
2079 /* cplen has length from above */
2080 memcpy(&wqe->wr, wr, cplen);
2081
2082 wqe->length = 0;
2083 j = 0;
2084 if (wr->num_sge) {
2085 struct rvt_sge *last_sge = NULL;
2086
2087 acc = wr->opcode >= IB_WR_RDMA_READ ?
2088 IB_ACCESS_LOCAL_WRITE : 0;
2089 for (i = 0; i < wr->num_sge; i++) {
2090 u32 length = wr->sg_list[i].length;
2091
2092 if (length == 0)
2093 continue;
2094 ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge,
2095 &wr->sg_list[i], acc);
2096 if (unlikely(ret < 0))
2097 goto bail_inval_free;
2098 wqe->length += length;
2099 if (ret)
2100 last_sge = &wqe->sg_list[j];
2101 j += ret;
2102 }
2103 wqe->wr.num_sge = j;
2104 }
2105
David Brazdil0f672f62019-12-10 10:32:29 +00002106 /*
2107 * Calculate and set SWQE PSN values prior to handing it off
2108 * to the driver's check routine. This give the driver the
2109 * opportunity to adjust PSN values based on internal checks.
2110 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002111 log_pmtu = qp->log_pmtu;
David Brazdil0f672f62019-12-10 10:32:29 +00002112 if (qp->allowed_ops == IB_OPCODE_UD) {
2113 struct rvt_ah *ah = rvt_get_swqe_ah(wqe);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002114
2115 log_pmtu = ah->log_pmtu;
David Brazdil0f672f62019-12-10 10:32:29 +00002116 rdma_copy_ah_attr(wqe->ud_wr.attr, &ah->attr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002117 }
2118
2119 if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
2120 if (local_ops_delayed)
2121 atomic_inc(&qp->local_ops_pending);
2122 else
2123 wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
2124 wqe->ssn = 0;
2125 wqe->psn = 0;
2126 wqe->lpsn = 0;
2127 } else {
2128 wqe->ssn = qp->s_ssn++;
2129 wqe->psn = qp->s_next_psn;
2130 wqe->lpsn = wqe->psn +
2131 (wqe->length ?
2132 ((wqe->length - 1) >> log_pmtu) :
2133 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002134 }
David Brazdil0f672f62019-12-10 10:32:29 +00002135
2136 /* general part of wqe valid - allow for driver checks */
2137 if (rdi->driver_f.setup_wqe) {
2138 ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
2139 if (ret < 0)
2140 goto bail_inval_free_ref;
2141 }
2142
2143 if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
2144 qp->s_next_psn = wqe->lpsn + 1;
2145
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002146 if (unlikely(reserved_op)) {
2147 wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
2148 rvt_qp_wqe_reserve(qp, wqe);
2149 } else {
2150 wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
2151 qp->s_avail--;
2152 }
2153 trace_rvt_post_one_wr(qp, wqe, wr->num_sge);
2154 smp_wmb(); /* see request builders */
2155 qp->s_head = next;
2156
2157 return 0;
2158
David Brazdil0f672f62019-12-10 10:32:29 +00002159bail_inval_free_ref:
2160 if (qp->allowed_ops == IB_OPCODE_UD)
2161 rdma_destroy_ah_attr(wqe->ud_wr.attr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002162bail_inval_free:
2163 /* release mr holds */
2164 while (j) {
2165 struct rvt_sge *sge = &wqe->sg_list[--j];
2166
2167 rvt_put_mr(sge->mr);
2168 }
2169 return ret;
2170}
2171
2172/**
2173 * rvt_post_send - post a send on a QP
2174 * @ibqp: the QP to post the send on
2175 * @wr: the list of work requests to post
2176 * @bad_wr: the first bad WR is put here
2177 *
2178 * This may be called from interrupt context.
2179 *
2180 * Return: 0 on success else errno
2181 */
2182int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2183 const struct ib_send_wr **bad_wr)
2184{
2185 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
2186 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2187 unsigned long flags = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00002188 bool call_send;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002189 unsigned nreq = 0;
2190 int err = 0;
2191
2192 spin_lock_irqsave(&qp->s_hlock, flags);
2193
2194 /*
2195 * Ensure QP state is such that we can send. If not bail out early,
2196 * there is no need to do this every time we post a send.
2197 */
2198 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
2199 spin_unlock_irqrestore(&qp->s_hlock, flags);
2200 return -EINVAL;
2201 }
2202
2203 /*
2204 * If the send queue is empty, and we only have a single WR then just go
2205 * ahead and kick the send engine into gear. Otherwise we will always
2206 * just schedule the send to happen later.
2207 */
2208 call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
2209
2210 for (; wr; wr = wr->next) {
2211 err = rvt_post_one_wr(qp, wr, &call_send);
2212 if (unlikely(err)) {
2213 *bad_wr = wr;
2214 goto bail;
2215 }
2216 nreq++;
2217 }
2218bail:
2219 spin_unlock_irqrestore(&qp->s_hlock, flags);
2220 if (nreq) {
David Brazdil0f672f62019-12-10 10:32:29 +00002221 /*
2222 * Only call do_send if there is exactly one packet, and the
2223 * driver said it was ok.
2224 */
2225 if (nreq == 1 && call_send)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002226 rdi->driver_f.do_send(qp);
2227 else
2228 rdi->driver_f.schedule_send_no_lock(qp);
2229 }
2230 return err;
2231}
2232
2233/**
2234 * rvt_post_srq_receive - post a receive on a shared receive queue
2235 * @ibsrq: the SRQ to post the receive on
2236 * @wr: the list of work requests to post
2237 * @bad_wr: A pointer to the first WR to cause a problem is put here
2238 *
2239 * This may be called from interrupt context.
2240 *
2241 * Return: 0 on success else errno
2242 */
2243int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2244 const struct ib_recv_wr **bad_wr)
2245{
2246 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
David Brazdil0f672f62019-12-10 10:32:29 +00002247 struct rvt_krwq *wq;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002248 unsigned long flags;
2249
2250 for (; wr; wr = wr->next) {
2251 struct rvt_rwqe *wqe;
2252 u32 next;
2253 int i;
2254
2255 if ((unsigned)wr->num_sge > srq->rq.max_sge) {
2256 *bad_wr = wr;
2257 return -EINVAL;
2258 }
2259
David Brazdil0f672f62019-12-10 10:32:29 +00002260 spin_lock_irqsave(&srq->rq.kwq->p_lock, flags);
2261 wq = srq->rq.kwq;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002262 next = wq->head + 1;
2263 if (next >= srq->rq.size)
2264 next = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00002265 if (next == READ_ONCE(wq->tail)) {
2266 spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002267 *bad_wr = wr;
2268 return -ENOMEM;
2269 }
2270
2271 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
2272 wqe->wr_id = wr->wr_id;
2273 wqe->num_sge = wr->num_sge;
David Brazdil0f672f62019-12-10 10:32:29 +00002274 for (i = 0; i < wr->num_sge; i++) {
2275 wqe->sg_list[i].addr = wr->sg_list[i].addr;
2276 wqe->sg_list[i].length = wr->sg_list[i].length;
2277 wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
2278 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002279 /* Make sure queue entry is written before the head index. */
David Brazdil0f672f62019-12-10 10:32:29 +00002280 smp_store_release(&wq->head, next);
2281 spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002282 }
2283 return 0;
2284}
2285
2286/*
David Brazdil0f672f62019-12-10 10:32:29 +00002287 * rvt used the internal kernel struct as part of its ABI, for now make sure
2288 * the kernel struct does not change layout. FIXME: rvt should never cast the
2289 * user struct to a kernel struct.
2290 */
2291static struct ib_sge *rvt_cast_sge(struct rvt_wqe_sge *sge)
2292{
2293 BUILD_BUG_ON(offsetof(struct ib_sge, addr) !=
2294 offsetof(struct rvt_wqe_sge, addr));
2295 BUILD_BUG_ON(offsetof(struct ib_sge, length) !=
2296 offsetof(struct rvt_wqe_sge, length));
2297 BUILD_BUG_ON(offsetof(struct ib_sge, lkey) !=
2298 offsetof(struct rvt_wqe_sge, lkey));
2299 return (struct ib_sge *)sge;
2300}
2301
2302/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002303 * Validate a RWQE and fill in the SGE state.
2304 * Return 1 if OK.
2305 */
2306static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
2307{
2308 int i, j, ret;
2309 struct ib_wc wc;
2310 struct rvt_lkey_table *rkt;
2311 struct rvt_pd *pd;
2312 struct rvt_sge_state *ss;
2313 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2314
2315 rkt = &rdi->lkey_table;
2316 pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
2317 ss = &qp->r_sge;
2318 ss->sg_list = qp->r_sg_list;
2319 qp->r_len = 0;
2320 for (i = j = 0; i < wqe->num_sge; i++) {
2321 if (wqe->sg_list[i].length == 0)
2322 continue;
2323 /* Check LKEY */
2324 ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
David Brazdil0f672f62019-12-10 10:32:29 +00002325 NULL, rvt_cast_sge(&wqe->sg_list[i]),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002326 IB_ACCESS_LOCAL_WRITE);
2327 if (unlikely(ret <= 0))
2328 goto bad_lkey;
2329 qp->r_len += wqe->sg_list[i].length;
2330 j++;
2331 }
2332 ss->num_sge = j;
2333 ss->total_len = qp->r_len;
2334 return 1;
2335
2336bad_lkey:
2337 while (j) {
2338 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
2339
2340 rvt_put_mr(sge->mr);
2341 }
2342 ss->num_sge = 0;
2343 memset(&wc, 0, sizeof(wc));
2344 wc.wr_id = wqe->wr_id;
2345 wc.status = IB_WC_LOC_PROT_ERR;
2346 wc.opcode = IB_WC_RECV;
2347 wc.qp = &qp->ibqp;
2348 /* Signal solicited completion event. */
2349 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
2350 return 0;
2351}
2352
2353/**
David Brazdil0f672f62019-12-10 10:32:29 +00002354 * get_rvt_head - get head indices of the circular buffer
2355 * @rq: data structure for request queue entry
2356 * @ip: the QP
2357 *
2358 * Return - head index value
2359 */
2360static inline u32 get_rvt_head(struct rvt_rq *rq, void *ip)
2361{
2362 u32 head;
2363
2364 if (ip)
2365 head = RDMA_READ_UAPI_ATOMIC(rq->wq->head);
2366 else
2367 head = rq->kwq->head;
2368
2369 return head;
2370}
2371
2372/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002373 * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
2374 * @qp: the QP
2375 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
2376 *
2377 * Return -1 if there is a local error, 0 if no RWQE is available,
2378 * otherwise return 1.
2379 *
2380 * Can be called from interrupt level.
2381 */
2382int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
2383{
2384 unsigned long flags;
2385 struct rvt_rq *rq;
David Brazdil0f672f62019-12-10 10:32:29 +00002386 struct rvt_krwq *kwq = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002387 struct rvt_rwq *wq;
2388 struct rvt_srq *srq;
2389 struct rvt_rwqe *wqe;
2390 void (*handler)(struct ib_event *, void *);
2391 u32 tail;
David Brazdil0f672f62019-12-10 10:32:29 +00002392 u32 head;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002393 int ret;
David Brazdil0f672f62019-12-10 10:32:29 +00002394 void *ip = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002395
2396 if (qp->ibqp.srq) {
2397 srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
2398 handler = srq->ibsrq.event_handler;
2399 rq = &srq->rq;
David Brazdil0f672f62019-12-10 10:32:29 +00002400 ip = srq->ip;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002401 } else {
2402 srq = NULL;
2403 handler = NULL;
2404 rq = &qp->r_rq;
David Brazdil0f672f62019-12-10 10:32:29 +00002405 ip = qp->ip;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002406 }
2407
David Brazdil0f672f62019-12-10 10:32:29 +00002408 spin_lock_irqsave(&rq->kwq->c_lock, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002409 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
2410 ret = 0;
2411 goto unlock;
2412 }
David Brazdil0f672f62019-12-10 10:32:29 +00002413 kwq = rq->kwq;
2414 if (ip) {
2415 wq = rq->wq;
2416 tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
2417 } else {
2418 tail = kwq->tail;
2419 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002420
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002421 /* Validate tail before using it since it is user writable. */
2422 if (tail >= rq->size)
2423 tail = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00002424
2425 if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
2426 head = get_rvt_head(rq, ip);
Olivier Deprez0e641232021-09-23 10:07:05 +02002427 kwq->count = rvt_get_rq_count(rq, head, tail);
David Brazdil0f672f62019-12-10 10:32:29 +00002428 }
2429 if (unlikely(kwq->count == 0)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002430 ret = 0;
2431 goto unlock;
2432 }
David Brazdil0f672f62019-12-10 10:32:29 +00002433 /* Make sure entry is read after the count is read. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002434 smp_rmb();
2435 wqe = rvt_get_rwqe_ptr(rq, tail);
2436 /*
2437 * Even though we update the tail index in memory, the verbs
2438 * consumer is not supposed to post more entries until a
2439 * completion is generated.
2440 */
2441 if (++tail >= rq->size)
2442 tail = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00002443 if (ip)
2444 RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
2445 else
2446 kwq->tail = tail;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002447 if (!wr_id_only && !init_sge(qp, wqe)) {
2448 ret = -1;
2449 goto unlock;
2450 }
2451 qp->r_wr_id = wqe->wr_id;
2452
David Brazdil0f672f62019-12-10 10:32:29 +00002453 kwq->count--;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002454 ret = 1;
2455 set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
2456 if (handler) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002457 /*
2458 * Validate head pointer value and compute
2459 * the number of remaining WQEs.
2460 */
David Brazdil0f672f62019-12-10 10:32:29 +00002461 if (kwq->count < srq->limit) {
Olivier Deprez0e641232021-09-23 10:07:05 +02002462 kwq->count =
2463 rvt_get_rq_count(rq,
2464 get_rvt_head(rq, ip), tail);
David Brazdil0f672f62019-12-10 10:32:29 +00002465 if (kwq->count < srq->limit) {
2466 struct ib_event ev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002467
David Brazdil0f672f62019-12-10 10:32:29 +00002468 srq->limit = 0;
2469 spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
2470 ev.device = qp->ibqp.device;
2471 ev.element.srq = qp->ibqp.srq;
2472 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
2473 handler(&ev, srq->ibsrq.srq_context);
2474 goto bail;
2475 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002476 }
2477 }
2478unlock:
David Brazdil0f672f62019-12-10 10:32:29 +00002479 spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002480bail:
2481 return ret;
2482}
2483EXPORT_SYMBOL(rvt_get_rwqe);
2484
2485/**
2486 * qp_comm_est - handle trap with QP established
2487 * @qp: the QP
2488 */
2489void rvt_comm_est(struct rvt_qp *qp)
2490{
2491 qp->r_flags |= RVT_R_COMM_EST;
2492 if (qp->ibqp.event_handler) {
2493 struct ib_event ev;
2494
2495 ev.device = qp->ibqp.device;
2496 ev.element.qp = &qp->ibqp;
2497 ev.event = IB_EVENT_COMM_EST;
2498 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2499 }
2500}
2501EXPORT_SYMBOL(rvt_comm_est);
2502
2503void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
2504{
2505 unsigned long flags;
2506 int lastwqe;
2507
2508 spin_lock_irqsave(&qp->s_lock, flags);
2509 lastwqe = rvt_error_qp(qp, err);
2510 spin_unlock_irqrestore(&qp->s_lock, flags);
2511
2512 if (lastwqe) {
2513 struct ib_event ev;
2514
2515 ev.device = qp->ibqp.device;
2516 ev.element.qp = &qp->ibqp;
2517 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
2518 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2519 }
2520}
2521EXPORT_SYMBOL(rvt_rc_error);
2522
2523/*
2524 * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
2525 * @index - the index
2526 * return usec from an index into ib_rvt_rnr_table
2527 */
2528unsigned long rvt_rnr_tbl_to_usec(u32 index)
2529{
2530 return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)];
2531}
2532EXPORT_SYMBOL(rvt_rnr_tbl_to_usec);
2533
2534static inline unsigned long rvt_aeth_to_usec(u32 aeth)
2535{
2536 return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) &
2537 IB_AETH_CREDIT_MASK];
2538}
2539
2540/*
David Brazdil0f672f62019-12-10 10:32:29 +00002541 * rvt_add_retry_timer_ext - add/start a retry timer
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002542 * @qp - the QP
David Brazdil0f672f62019-12-10 10:32:29 +00002543 * @shift - timeout shift to wait for multiple packets
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002544 * add a retry timer on the QP
2545 */
David Brazdil0f672f62019-12-10 10:32:29 +00002546void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002547{
2548 struct ib_qp *ibqp = &qp->ibqp;
2549 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2550
2551 lockdep_assert_held(&qp->s_lock);
2552 qp->s_flags |= RVT_S_TIMER;
2553 /* 4.096 usec. * (1 << qp->timeout) */
David Brazdil0f672f62019-12-10 10:32:29 +00002554 qp->s_timer.expires = jiffies + rdi->busy_jiffies +
2555 (qp->timeout_jiffies << shift);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002556 add_timer(&qp->s_timer);
2557}
David Brazdil0f672f62019-12-10 10:32:29 +00002558EXPORT_SYMBOL(rvt_add_retry_timer_ext);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002559
2560/**
2561 * rvt_add_rnr_timer - add/start an rnr timer
2562 * @qp - the QP
2563 * @aeth - aeth of RNR timeout, simulated aeth for loopback
2564 * add an rnr timer on the QP
2565 */
2566void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
2567{
2568 u32 to;
2569
2570 lockdep_assert_held(&qp->s_lock);
2571 qp->s_flags |= RVT_S_WAIT_RNR;
2572 to = rvt_aeth_to_usec(aeth);
2573 trace_rvt_rnrnak_add(qp, to);
2574 hrtimer_start(&qp->s_rnr_timer,
2575 ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED);
2576}
2577EXPORT_SYMBOL(rvt_add_rnr_timer);
2578
2579/**
2580 * rvt_stop_rc_timers - stop all timers
2581 * @qp - the QP
2582 * stop any pending timers
2583 */
2584void rvt_stop_rc_timers(struct rvt_qp *qp)
2585{
2586 lockdep_assert_held(&qp->s_lock);
2587 /* Remove QP from all timers */
2588 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
2589 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
2590 del_timer(&qp->s_timer);
2591 hrtimer_try_to_cancel(&qp->s_rnr_timer);
2592 }
2593}
2594EXPORT_SYMBOL(rvt_stop_rc_timers);
2595
2596/**
2597 * rvt_stop_rnr_timer - stop an rnr timer
2598 * @qp - the QP
2599 *
2600 * stop an rnr timer and return if the timer
2601 * had been pending.
2602 */
2603static void rvt_stop_rnr_timer(struct rvt_qp *qp)
2604{
2605 lockdep_assert_held(&qp->s_lock);
2606 /* Remove QP from rnr timer */
2607 if (qp->s_flags & RVT_S_WAIT_RNR) {
2608 qp->s_flags &= ~RVT_S_WAIT_RNR;
2609 trace_rvt_rnrnak_stop(qp, 0);
2610 }
2611}
2612
2613/**
2614 * rvt_del_timers_sync - wait for any timeout routines to exit
2615 * @qp - the QP
2616 */
2617void rvt_del_timers_sync(struct rvt_qp *qp)
2618{
2619 del_timer_sync(&qp->s_timer);
2620 hrtimer_cancel(&qp->s_rnr_timer);
2621}
2622EXPORT_SYMBOL(rvt_del_timers_sync);
2623
2624/**
2625 * This is called from s_timer for missing responses.
2626 */
2627static void rvt_rc_timeout(struct timer_list *t)
2628{
2629 struct rvt_qp *qp = from_timer(qp, t, s_timer);
2630 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2631 unsigned long flags;
2632
2633 spin_lock_irqsave(&qp->r_lock, flags);
2634 spin_lock(&qp->s_lock);
2635 if (qp->s_flags & RVT_S_TIMER) {
2636 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
2637
2638 qp->s_flags &= ~RVT_S_TIMER;
2639 rvp->n_rc_timeouts++;
2640 del_timer(&qp->s_timer);
2641 trace_rvt_rc_timeout(qp, qp->s_last_psn + 1);
2642 if (rdi->driver_f.notify_restart_rc)
2643 rdi->driver_f.notify_restart_rc(qp,
2644 qp->s_last_psn + 1,
2645 1);
2646 rdi->driver_f.schedule_send(qp);
2647 }
2648 spin_unlock(&qp->s_lock);
2649 spin_unlock_irqrestore(&qp->r_lock, flags);
2650}
2651
2652/*
2653 * This is called from s_timer for RNR timeouts.
2654 */
2655enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
2656{
2657 struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
2658 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2659 unsigned long flags;
2660
2661 spin_lock_irqsave(&qp->s_lock, flags);
2662 rvt_stop_rnr_timer(qp);
2663 trace_rvt_rnrnak_timeout(qp, 0);
2664 rdi->driver_f.schedule_send(qp);
2665 spin_unlock_irqrestore(&qp->s_lock, flags);
2666 return HRTIMER_NORESTART;
2667}
2668EXPORT_SYMBOL(rvt_rc_rnr_retry);
2669
2670/**
2671 * rvt_qp_iter_init - initial for QP iteration
2672 * @rdi: rvt devinfo
2673 * @v: u64 value
2674 *
2675 * This returns an iterator suitable for iterating QPs
2676 * in the system.
2677 *
2678 * The @cb is a user defined callback and @v is a 64
2679 * bit value passed to and relevant for processing in the
2680 * @cb. An example use case would be to alter QP processing
2681 * based on criteria not part of the rvt_qp.
2682 *
2683 * Use cases that require memory allocation to succeed
2684 * must preallocate appropriately.
2685 *
2686 * Return: a pointer to an rvt_qp_iter or NULL
2687 */
2688struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
2689 u64 v,
2690 void (*cb)(struct rvt_qp *qp, u64 v))
2691{
2692 struct rvt_qp_iter *i;
2693
2694 i = kzalloc(sizeof(*i), GFP_KERNEL);
2695 if (!i)
2696 return NULL;
2697
2698 i->rdi = rdi;
2699 /* number of special QPs (SMI/GSI) for device */
2700 i->specials = rdi->ibdev.phys_port_cnt * 2;
2701 i->v = v;
2702 i->cb = cb;
2703
2704 return i;
2705}
2706EXPORT_SYMBOL(rvt_qp_iter_init);
2707
2708/**
2709 * rvt_qp_iter_next - return the next QP in iter
2710 * @iter - the iterator
2711 *
2712 * Fine grained QP iterator suitable for use
2713 * with debugfs seq_file mechanisms.
2714 *
2715 * Updates iter->qp with the current QP when the return
2716 * value is 0.
2717 *
2718 * Return: 0 - iter->qp is valid 1 - no more QPs
2719 */
2720int rvt_qp_iter_next(struct rvt_qp_iter *iter)
2721 __must_hold(RCU)
2722{
2723 int n = iter->n;
2724 int ret = 1;
2725 struct rvt_qp *pqp = iter->qp;
2726 struct rvt_qp *qp;
2727 struct rvt_dev_info *rdi = iter->rdi;
2728
2729 /*
2730 * The approach is to consider the special qps
2731 * as additional table entries before the
2732 * real hash table. Since the qp code sets
2733 * the qp->next hash link to NULL, this works just fine.
2734 *
2735 * iter->specials is 2 * # ports
2736 *
2737 * n = 0..iter->specials is the special qp indices
2738 *
2739 * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2740 * the potential hash bucket entries
2741 *
2742 */
2743 for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) {
2744 if (pqp) {
2745 qp = rcu_dereference(pqp->next);
2746 } else {
2747 if (n < iter->specials) {
2748 struct rvt_ibport *rvp;
2749 int pidx;
2750
2751 pidx = n % rdi->ibdev.phys_port_cnt;
2752 rvp = rdi->ports[pidx];
2753 qp = rcu_dereference(rvp->qp[n & 1]);
2754 } else {
2755 qp = rcu_dereference(
2756 rdi->qp_dev->qp_table[
2757 (n - iter->specials)]);
2758 }
2759 }
2760 pqp = qp;
2761 if (qp) {
2762 iter->qp = qp;
2763 iter->n = n;
2764 return 0;
2765 }
2766 }
2767 return ret;
2768}
2769EXPORT_SYMBOL(rvt_qp_iter_next);
2770
2771/**
2772 * rvt_qp_iter - iterate all QPs
2773 * @rdi - rvt devinfo
2774 * @v - a 64 bit value
2775 * @cb - a callback
2776 *
2777 * This provides a way for iterating all QPs.
2778 *
2779 * The @cb is a user defined callback and @v is a 64
2780 * bit value passed to and relevant for processing in the
2781 * cb. An example use case would be to alter QP processing
2782 * based on criteria not part of the rvt_qp.
2783 *
2784 * The code has an internal iterator to simplify
2785 * non seq_file use cases.
2786 */
2787void rvt_qp_iter(struct rvt_dev_info *rdi,
2788 u64 v,
2789 void (*cb)(struct rvt_qp *qp, u64 v))
2790{
2791 int ret;
2792 struct rvt_qp_iter i = {
2793 .rdi = rdi,
2794 .specials = rdi->ibdev.phys_port_cnt * 2,
2795 .v = v,
2796 .cb = cb
2797 };
2798
2799 rcu_read_lock();
2800 do {
2801 ret = rvt_qp_iter_next(&i);
2802 if (!ret) {
2803 rvt_get_qp(i.qp);
2804 rcu_read_unlock();
2805 i.cb(i.qp, i.v);
2806 rcu_read_lock();
2807 rvt_put_qp(i.qp);
2808 }
2809 } while (!ret);
2810 rcu_read_unlock();
2811}
2812EXPORT_SYMBOL(rvt_qp_iter);
David Brazdil0f672f62019-12-10 10:32:29 +00002813
2814/*
2815 * This should be called with s_lock held.
2816 */
2817void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
2818 enum ib_wc_status status)
2819{
2820 u32 old_last, last;
2821 struct rvt_dev_info *rdi;
2822
2823 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2824 return;
2825 rdi = ib_to_rvt(qp->ibqp.device);
2826
2827 old_last = qp->s_last;
2828 trace_rvt_qp_send_completion(qp, wqe, old_last);
2829 last = rvt_qp_complete_swqe(qp, wqe, rdi->wc_opcode[wqe->wr.opcode],
2830 status);
2831 if (qp->s_acked == old_last)
2832 qp->s_acked = last;
2833 if (qp->s_cur == old_last)
2834 qp->s_cur = last;
2835 if (qp->s_tail == old_last)
2836 qp->s_tail = last;
2837 if (qp->state == IB_QPS_SQD && last == qp->s_cur)
2838 qp->s_draining = 0;
2839}
2840EXPORT_SYMBOL(rvt_send_complete);
2841
2842/**
2843 * rvt_copy_sge - copy data to SGE memory
2844 * @qp: associated QP
2845 * @ss: the SGE state
2846 * @data: the data to copy
2847 * @length: the length of the data
2848 * @release: boolean to release MR
2849 * @copy_last: do a separate copy of the last 8 bytes
2850 */
2851void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
2852 void *data, u32 length,
2853 bool release, bool copy_last)
2854{
2855 struct rvt_sge *sge = &ss->sge;
2856 int i;
2857 bool in_last = false;
2858 bool cacheless_copy = false;
2859 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2860 struct rvt_wss *wss = rdi->wss;
2861 unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
2862
2863 if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) {
2864 cacheless_copy = length >= PAGE_SIZE;
2865 } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) {
2866 if (length >= PAGE_SIZE) {
2867 /*
2868 * NOTE: this *assumes*:
2869 * o The first vaddr is the dest.
2870 * o If multiple pages, then vaddr is sequential.
2871 */
2872 wss_insert(wss, sge->vaddr);
2873 if (length >= (2 * PAGE_SIZE))
2874 wss_insert(wss, (sge->vaddr + PAGE_SIZE));
2875
2876 cacheless_copy = wss_exceeds_threshold(wss);
2877 } else {
2878 wss_advance_clean_counter(wss);
2879 }
2880 }
2881
2882 if (copy_last) {
2883 if (length > 8) {
2884 length -= 8;
2885 } else {
2886 copy_last = false;
2887 in_last = true;
2888 }
2889 }
2890
2891again:
2892 while (length) {
2893 u32 len = rvt_get_sge_length(sge, length);
2894
2895 WARN_ON_ONCE(len == 0);
2896 if (unlikely(in_last)) {
2897 /* enforce byte transfer ordering */
2898 for (i = 0; i < len; i++)
2899 ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
2900 } else if (cacheless_copy) {
2901 cacheless_memcpy(sge->vaddr, data, len);
2902 } else {
2903 memcpy(sge->vaddr, data, len);
2904 }
2905 rvt_update_sge(ss, len, release);
2906 data += len;
2907 length -= len;
2908 }
2909
2910 if (copy_last) {
2911 copy_last = false;
2912 in_last = true;
2913 length = 8;
2914 goto again;
2915 }
2916}
2917EXPORT_SYMBOL(rvt_copy_sge);
2918
2919static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp,
2920 struct rvt_qp *sqp)
2921{
2922 rvp->n_pkt_drops++;
2923 /*
2924 * For RC, the requester would timeout and retry so
2925 * shortcut the timeouts and just signal too many retries.
2926 */
2927 return sqp->ibqp.qp_type == IB_QPT_RC ?
2928 IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS;
2929}
2930
2931/**
2932 * ruc_loopback - handle UC and RC loopback requests
2933 * @sqp: the sending QP
2934 *
2935 * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
2936 * Note that although we are single threaded due to the send engine, we still
2937 * have to protect against post_send(). We don't have to worry about
2938 * receive interrupts since this is a connected protocol and all packets
2939 * will pass through here.
2940 */
2941void rvt_ruc_loopback(struct rvt_qp *sqp)
2942{
2943 struct rvt_ibport *rvp = NULL;
2944 struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device);
2945 struct rvt_qp *qp;
2946 struct rvt_swqe *wqe;
2947 struct rvt_sge *sge;
2948 unsigned long flags;
2949 struct ib_wc wc;
2950 u64 sdata;
2951 atomic64_t *maddr;
2952 enum ib_wc_status send_status;
2953 bool release;
2954 int ret;
2955 bool copy_last = false;
2956 int local_ops = 0;
2957
2958 rcu_read_lock();
2959 rvp = rdi->ports[sqp->port_num - 1];
2960
2961 /*
2962 * Note that we check the responder QP state after
2963 * checking the requester's state.
2964 */
2965
2966 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp,
2967 sqp->remote_qpn);
2968
2969 spin_lock_irqsave(&sqp->s_lock, flags);
2970
2971 /* Return if we are already busy processing a work request. */
2972 if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
2973 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2974 goto unlock;
2975
2976 sqp->s_flags |= RVT_S_BUSY;
2977
2978again:
2979 if (sqp->s_last == READ_ONCE(sqp->s_head))
2980 goto clr_busy;
2981 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
2982
2983 /* Return if it is not OK to start a new work request. */
2984 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
2985 if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
2986 goto clr_busy;
2987 /* We are in the error state, flush the work request. */
2988 send_status = IB_WC_WR_FLUSH_ERR;
2989 goto flush_send;
2990 }
2991
2992 /*
2993 * We can rely on the entry not changing without the s_lock
2994 * being held until we update s_last.
2995 * We increment s_cur to indicate s_last is in progress.
2996 */
2997 if (sqp->s_last == sqp->s_cur) {
2998 if (++sqp->s_cur >= sqp->s_size)
2999 sqp->s_cur = 0;
3000 }
3001 spin_unlock_irqrestore(&sqp->s_lock, flags);
3002
3003 if (!qp) {
3004 send_status = loopback_qp_drop(rvp, sqp);
3005 goto serr_no_r_lock;
3006 }
3007 spin_lock_irqsave(&qp->r_lock, flags);
3008 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
3009 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
3010 send_status = loopback_qp_drop(rvp, sqp);
3011 goto serr;
3012 }
3013
3014 memset(&wc, 0, sizeof(wc));
3015 send_status = IB_WC_SUCCESS;
3016
3017 release = true;
3018 sqp->s_sge.sge = wqe->sg_list[0];
3019 sqp->s_sge.sg_list = wqe->sg_list + 1;
3020 sqp->s_sge.num_sge = wqe->wr.num_sge;
3021 sqp->s_len = wqe->length;
3022 switch (wqe->wr.opcode) {
3023 case IB_WR_REG_MR:
3024 goto send_comp;
3025
3026 case IB_WR_LOCAL_INV:
3027 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
3028 if (rvt_invalidate_rkey(sqp,
3029 wqe->wr.ex.invalidate_rkey))
3030 send_status = IB_WC_LOC_PROT_ERR;
3031 local_ops = 1;
3032 }
3033 goto send_comp;
3034
3035 case IB_WR_SEND_WITH_INV:
3036 case IB_WR_SEND_WITH_IMM:
3037 case IB_WR_SEND:
3038 ret = rvt_get_rwqe(qp, false);
3039 if (ret < 0)
3040 goto op_err;
3041 if (!ret)
3042 goto rnr_nak;
3043 if (wqe->length > qp->r_len)
3044 goto inv_err;
3045 switch (wqe->wr.opcode) {
3046 case IB_WR_SEND_WITH_INV:
3047 if (!rvt_invalidate_rkey(qp,
3048 wqe->wr.ex.invalidate_rkey)) {
3049 wc.wc_flags = IB_WC_WITH_INVALIDATE;
3050 wc.ex.invalidate_rkey =
3051 wqe->wr.ex.invalidate_rkey;
3052 }
3053 break;
3054 case IB_WR_SEND_WITH_IMM:
3055 wc.wc_flags = IB_WC_WITH_IMM;
3056 wc.ex.imm_data = wqe->wr.ex.imm_data;
3057 break;
3058 default:
3059 break;
3060 }
3061 break;
3062
3063 case IB_WR_RDMA_WRITE_WITH_IMM:
3064 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3065 goto inv_err;
3066 wc.wc_flags = IB_WC_WITH_IMM;
3067 wc.ex.imm_data = wqe->wr.ex.imm_data;
3068 ret = rvt_get_rwqe(qp, true);
3069 if (ret < 0)
3070 goto op_err;
3071 if (!ret)
3072 goto rnr_nak;
3073 /* skip copy_last set and qp_access_flags recheck */
3074 goto do_write;
3075 case IB_WR_RDMA_WRITE:
3076 copy_last = rvt_is_user_qp(qp);
3077 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3078 goto inv_err;
3079do_write:
3080 if (wqe->length == 0)
3081 break;
3082 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
3083 wqe->rdma_wr.remote_addr,
3084 wqe->rdma_wr.rkey,
3085 IB_ACCESS_REMOTE_WRITE)))
3086 goto acc_err;
3087 qp->r_sge.sg_list = NULL;
3088 qp->r_sge.num_sge = 1;
3089 qp->r_sge.total_len = wqe->length;
3090 break;
3091
3092 case IB_WR_RDMA_READ:
3093 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
3094 goto inv_err;
3095 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
3096 wqe->rdma_wr.remote_addr,
3097 wqe->rdma_wr.rkey,
3098 IB_ACCESS_REMOTE_READ)))
3099 goto acc_err;
3100 release = false;
3101 sqp->s_sge.sg_list = NULL;
3102 sqp->s_sge.num_sge = 1;
3103 qp->r_sge.sge = wqe->sg_list[0];
3104 qp->r_sge.sg_list = wqe->sg_list + 1;
3105 qp->r_sge.num_sge = wqe->wr.num_sge;
3106 qp->r_sge.total_len = wqe->length;
3107 break;
3108
3109 case IB_WR_ATOMIC_CMP_AND_SWP:
3110 case IB_WR_ATOMIC_FETCH_AND_ADD:
3111 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
3112 goto inv_err;
3113 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
3114 wqe->atomic_wr.remote_addr,
3115 wqe->atomic_wr.rkey,
3116 IB_ACCESS_REMOTE_ATOMIC)))
3117 goto acc_err;
3118 /* Perform atomic OP and save result. */
3119 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
3120 sdata = wqe->atomic_wr.compare_add;
3121 *(u64 *)sqp->s_sge.sge.vaddr =
3122 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
3123 (u64)atomic64_add_return(sdata, maddr) - sdata :
3124 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
3125 sdata, wqe->atomic_wr.swap);
3126 rvt_put_mr(qp->r_sge.sge.mr);
3127 qp->r_sge.num_sge = 0;
3128 goto send_comp;
3129
3130 default:
3131 send_status = IB_WC_LOC_QP_OP_ERR;
3132 goto serr;
3133 }
3134
3135 sge = &sqp->s_sge.sge;
3136 while (sqp->s_len) {
3137 u32 len = rvt_get_sge_length(sge, sqp->s_len);
3138
3139 WARN_ON_ONCE(len == 0);
3140 rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
3141 len, release, copy_last);
3142 rvt_update_sge(&sqp->s_sge, len, !release);
3143 sqp->s_len -= len;
3144 }
3145 if (release)
3146 rvt_put_ss(&qp->r_sge);
3147
3148 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
3149 goto send_comp;
3150
3151 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
3152 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
3153 else
3154 wc.opcode = IB_WC_RECV;
3155 wc.wr_id = qp->r_wr_id;
3156 wc.status = IB_WC_SUCCESS;
3157 wc.byte_len = wqe->length;
3158 wc.qp = &qp->ibqp;
3159 wc.src_qp = qp->remote_qpn;
3160 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
3161 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
3162 wc.port_num = 1;
3163 /* Signal completion event if the solicited bit is set. */
3164 rvt_recv_cq(qp, &wc, wqe->wr.send_flags & IB_SEND_SOLICITED);
3165
3166send_comp:
3167 spin_unlock_irqrestore(&qp->r_lock, flags);
3168 spin_lock_irqsave(&sqp->s_lock, flags);
3169 rvp->n_loop_pkts++;
3170flush_send:
3171 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
3172 rvt_send_complete(sqp, wqe, send_status);
3173 if (local_ops) {
3174 atomic_dec(&sqp->local_ops_pending);
3175 local_ops = 0;
3176 }
3177 goto again;
3178
3179rnr_nak:
3180 /* Handle RNR NAK */
3181 if (qp->ibqp.qp_type == IB_QPT_UC)
3182 goto send_comp;
3183 rvp->n_rnr_naks++;
3184 /*
3185 * Note: we don't need the s_lock held since the BUSY flag
3186 * makes this single threaded.
3187 */
3188 if (sqp->s_rnr_retry == 0) {
3189 send_status = IB_WC_RNR_RETRY_EXC_ERR;
3190 goto serr;
3191 }
3192 if (sqp->s_rnr_retry_cnt < 7)
3193 sqp->s_rnr_retry--;
3194 spin_unlock_irqrestore(&qp->r_lock, flags);
3195 spin_lock_irqsave(&sqp->s_lock, flags);
3196 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
3197 goto clr_busy;
3198 rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
3199 IB_AETH_CREDIT_SHIFT);
3200 goto clr_busy;
3201
3202op_err:
3203 send_status = IB_WC_REM_OP_ERR;
3204 wc.status = IB_WC_LOC_QP_OP_ERR;
3205 goto err;
3206
3207inv_err:
3208 send_status =
3209 sqp->ibqp.qp_type == IB_QPT_RC ?
3210 IB_WC_REM_INV_REQ_ERR :
3211 IB_WC_SUCCESS;
3212 wc.status = IB_WC_LOC_QP_OP_ERR;
3213 goto err;
3214
3215acc_err:
3216 send_status = IB_WC_REM_ACCESS_ERR;
3217 wc.status = IB_WC_LOC_PROT_ERR;
3218err:
3219 /* responder goes to error state */
3220 rvt_rc_error(qp, wc.status);
3221
3222serr:
3223 spin_unlock_irqrestore(&qp->r_lock, flags);
3224serr_no_r_lock:
3225 spin_lock_irqsave(&sqp->s_lock, flags);
3226 rvt_send_complete(sqp, wqe, send_status);
3227 if (sqp->ibqp.qp_type == IB_QPT_RC) {
3228 int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
3229
3230 sqp->s_flags &= ~RVT_S_BUSY;
3231 spin_unlock_irqrestore(&sqp->s_lock, flags);
3232 if (lastwqe) {
3233 struct ib_event ev;
3234
3235 ev.device = sqp->ibqp.device;
3236 ev.element.qp = &sqp->ibqp;
3237 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
3238 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
3239 }
3240 goto done;
3241 }
3242clr_busy:
3243 sqp->s_flags &= ~RVT_S_BUSY;
3244unlock:
3245 spin_unlock_irqrestore(&sqp->s_lock, flags);
3246done:
3247 rcu_read_unlock();
3248}
3249EXPORT_SYMBOL(rvt_ruc_loopback);