blob: 24fef021d51dbb825135d01c6840339ed6a805ee [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Copyright(c) 2016 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <linux/err.h>
49#include <linux/slab.h>
50#include <linux/vmalloc.h>
David Brazdil0f672f62019-12-10 10:32:29 +000051#include <rdma/uverbs_ioctl.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052
53#include "srq.h"
54#include "vt.h"
David Brazdil0f672f62019-12-10 10:32:29 +000055#include "qp.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000056/**
57 * rvt_driver_srq_init - init srq resources on a per driver basis
58 * @rdi: rvt dev structure
59 *
60 * Do any initialization needed when a driver registers with rdmavt.
61 */
62void rvt_driver_srq_init(struct rvt_dev_info *rdi)
63{
64 spin_lock_init(&rdi->n_srqs_lock);
65 rdi->n_srqs_allocated = 0;
66}
67
68/**
69 * rvt_create_srq - create a shared receive queue
70 * @ibpd: the protection domain of the SRQ to create
71 * @srq_init_attr: the attributes of the SRQ
72 * @udata: data from libibverbs when creating a user SRQ
73 *
David Brazdil0f672f62019-12-10 10:32:29 +000074 * Return: 0 on success
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000075 */
David Brazdil0f672f62019-12-10 10:32:29 +000076int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
77 struct ib_udata *udata)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000078{
David Brazdil0f672f62019-12-10 10:32:29 +000079 struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
80 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000081 u32 sz;
David Brazdil0f672f62019-12-10 10:32:29 +000082 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083
84 if (srq_init_attr->srq_type != IB_SRQT_BASIC)
David Brazdil0f672f62019-12-10 10:32:29 +000085 return -EOPNOTSUPP;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000086
87 if (srq_init_attr->attr.max_sge == 0 ||
88 srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge ||
89 srq_init_attr->attr.max_wr == 0 ||
90 srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
David Brazdil0f672f62019-12-10 10:32:29 +000091 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000092
93 /*
94 * Need to use vmalloc() if we want to support large #s of entries.
95 */
96 srq->rq.size = srq_init_attr->attr.max_wr + 1;
97 srq->rq.max_sge = srq_init_attr->attr.max_sge;
98 sz = sizeof(struct ib_sge) * srq->rq.max_sge +
99 sizeof(struct rvt_rwqe);
David Brazdil0f672f62019-12-10 10:32:29 +0000100 if (rvt_alloc_rq(&srq->rq, srq->rq.size * sz,
101 dev->dparms.node, udata)) {
102 ret = -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000103 goto bail_srq;
104 }
105
106 /*
107 * Return the address of the RWQ as the offset to mmap.
108 * See rvt_mmap() for details.
109 */
110 if (udata && udata->outlen >= sizeof(__u64)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111 u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
112
David Brazdil0f672f62019-12-10 10:32:29 +0000113 srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114 if (!srq->ip) {
David Brazdil0f672f62019-12-10 10:32:29 +0000115 ret = -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116 goto bail_wq;
117 }
118
David Brazdil0f672f62019-12-10 10:32:29 +0000119 ret = ib_copy_to_udata(udata, &srq->ip->offset,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120 sizeof(srq->ip->offset));
David Brazdil0f672f62019-12-10 10:32:29 +0000121 if (ret)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000122 goto bail_ip;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000123 }
124
125 /*
126 * ib_create_srq() will initialize srq->ibsrq.
127 */
128 spin_lock_init(&srq->rq.lock);
129 srq->limit = srq_init_attr->attr.srq_limit;
130
131 spin_lock(&dev->n_srqs_lock);
132 if (dev->n_srqs_allocated == dev->dparms.props.max_srq) {
133 spin_unlock(&dev->n_srqs_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000134 ret = -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000135 goto bail_ip;
136 }
137
138 dev->n_srqs_allocated++;
139 spin_unlock(&dev->n_srqs_lock);
140
141 if (srq->ip) {
142 spin_lock_irq(&dev->pending_lock);
143 list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
144 spin_unlock_irq(&dev->pending_lock);
145 }
146
David Brazdil0f672f62019-12-10 10:32:29 +0000147 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000148
149bail_ip:
150 kfree(srq->ip);
151bail_wq:
David Brazdil0f672f62019-12-10 10:32:29 +0000152 rvt_free_rq(&srq->rq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000153bail_srq:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000154 return ret;
155}
156
157/**
158 * rvt_modify_srq - modify a shared receive queue
159 * @ibsrq: the SRQ to modify
160 * @attr: the new attributes of the SRQ
161 * @attr_mask: indicates which attributes to modify
162 * @udata: user data for libibverbs.so
163 *
164 * Return: 0 on success
165 */
166int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
167 enum ib_srq_attr_mask attr_mask,
168 struct ib_udata *udata)
169{
170 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
171 struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
David Brazdil0f672f62019-12-10 10:32:29 +0000172 struct rvt_rq tmp_rq = {};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000173 int ret = 0;
174
175 if (attr_mask & IB_SRQ_MAX_WR) {
David Brazdil0f672f62019-12-10 10:32:29 +0000176 struct rvt_krwq *okwq = NULL;
177 struct rvt_rwq *owq = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000178 struct rvt_rwqe *p;
179 u32 sz, size, n, head, tail;
180
181 /* Check that the requested sizes are below the limits. */
182 if ((attr->max_wr > dev->dparms.props.max_srq_wr) ||
183 ((attr_mask & IB_SRQ_LIMIT) ?
184 attr->srq_limit : srq->limit) > attr->max_wr)
185 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000186 sz = sizeof(struct rvt_rwqe) +
187 srq->rq.max_sge * sizeof(struct ib_sge);
188 size = attr->max_wr + 1;
David Brazdil0f672f62019-12-10 10:32:29 +0000189 if (rvt_alloc_rq(&tmp_rq, size * sz, dev->dparms.node,
190 udata))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000191 return -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000192 /* Check that we can write the offset to mmap. */
193 if (udata && udata->inlen >= sizeof(__u64)) {
194 __u64 offset_addr;
195 __u64 offset = 0;
196
197 ret = ib_copy_from_udata(&offset_addr, udata,
198 sizeof(offset_addr));
199 if (ret)
200 goto bail_free;
201 udata->outbuf = (void __user *)
202 (unsigned long)offset_addr;
203 ret = ib_copy_to_udata(udata, &offset,
204 sizeof(offset));
205 if (ret)
206 goto bail_free;
207 }
208
David Brazdil0f672f62019-12-10 10:32:29 +0000209 spin_lock_irq(&srq->rq.kwq->c_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000210 /*
211 * validate head and tail pointer values and compute
212 * the number of remaining WQEs.
213 */
David Brazdil0f672f62019-12-10 10:32:29 +0000214 if (udata) {
215 owq = srq->rq.wq;
216 head = RDMA_READ_UAPI_ATOMIC(owq->head);
217 tail = RDMA_READ_UAPI_ATOMIC(owq->tail);
218 } else {
219 okwq = srq->rq.kwq;
220 head = okwq->head;
221 tail = okwq->tail;
222 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000223 if (head >= srq->rq.size || tail >= srq->rq.size) {
224 ret = -EINVAL;
225 goto bail_unlock;
226 }
227 n = head;
228 if (n < tail)
229 n += srq->rq.size - tail;
230 else
231 n -= tail;
232 if (size <= n) {
233 ret = -EINVAL;
234 goto bail_unlock;
235 }
236 n = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000237 p = tmp_rq.kwq->curr_wq;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000238 while (tail != head) {
239 struct rvt_rwqe *wqe;
240 int i;
241
242 wqe = rvt_get_rwqe_ptr(&srq->rq, tail);
243 p->wr_id = wqe->wr_id;
244 p->num_sge = wqe->num_sge;
245 for (i = 0; i < wqe->num_sge; i++)
246 p->sg_list[i] = wqe->sg_list[i];
247 n++;
248 p = (struct rvt_rwqe *)((char *)p + sz);
249 if (++tail >= srq->rq.size)
250 tail = 0;
251 }
David Brazdil0f672f62019-12-10 10:32:29 +0000252 srq->rq.kwq = tmp_rq.kwq;
253 if (udata) {
254 srq->rq.wq = tmp_rq.wq;
255 RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->head, n);
256 RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->tail, 0);
257 } else {
258 tmp_rq.kwq->head = n;
259 tmp_rq.kwq->tail = 0;
260 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000261 srq->rq.size = size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000262 if (attr_mask & IB_SRQ_LIMIT)
263 srq->limit = attr->srq_limit;
David Brazdil0f672f62019-12-10 10:32:29 +0000264 spin_unlock_irq(&srq->rq.kwq->c_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265
266 vfree(owq);
David Brazdil0f672f62019-12-10 10:32:29 +0000267 kvfree(okwq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000268
269 if (srq->ip) {
270 struct rvt_mmap_info *ip = srq->ip;
271 struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device);
272 u32 s = sizeof(struct rvt_rwq) + size * sz;
273
David Brazdil0f672f62019-12-10 10:32:29 +0000274 rvt_update_mmap_info(dev, ip, s, tmp_rq.wq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000275
276 /*
277 * Return the offset to mmap.
278 * See rvt_mmap() for details.
279 */
280 if (udata && udata->inlen >= sizeof(__u64)) {
281 ret = ib_copy_to_udata(udata, &ip->offset,
282 sizeof(ip->offset));
283 if (ret)
284 return ret;
285 }
286
287 /*
288 * Put user mapping info onto the pending list
289 * unless it already is on the list.
290 */
291 spin_lock_irq(&dev->pending_lock);
292 if (list_empty(&ip->pending_mmaps))
293 list_add(&ip->pending_mmaps,
294 &dev->pending_mmaps);
295 spin_unlock_irq(&dev->pending_lock);
296 }
297 } else if (attr_mask & IB_SRQ_LIMIT) {
David Brazdil0f672f62019-12-10 10:32:29 +0000298 spin_lock_irq(&srq->rq.kwq->c_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000299 if (attr->srq_limit >= srq->rq.size)
300 ret = -EINVAL;
301 else
302 srq->limit = attr->srq_limit;
David Brazdil0f672f62019-12-10 10:32:29 +0000303 spin_unlock_irq(&srq->rq.kwq->c_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000304 }
305 return ret;
306
307bail_unlock:
David Brazdil0f672f62019-12-10 10:32:29 +0000308 spin_unlock_irq(&srq->rq.kwq->c_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000309bail_free:
David Brazdil0f672f62019-12-10 10:32:29 +0000310 rvt_free_rq(&tmp_rq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000311 return ret;
312}
313
314/** rvt_query_srq - query srq data
315 * @ibsrq: srq to query
316 * @attr: return info in attr
317 *
318 * Return: always 0
319 */
320int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
321{
322 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
323
324 attr->max_wr = srq->rq.size - 1;
325 attr->max_sge = srq->rq.max_sge;
326 attr->srq_limit = srq->limit;
327 return 0;
328}
329
330/**
331 * rvt_destroy_srq - destory an srq
332 * @ibsrq: srq object to destroy
333 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000334 */
David Brazdil0f672f62019-12-10 10:32:29 +0000335void rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000336{
337 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
338 struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
339
340 spin_lock(&dev->n_srqs_lock);
341 dev->n_srqs_allocated--;
342 spin_unlock(&dev->n_srqs_lock);
343 if (srq->ip)
344 kref_put(&srq->ip->ref, rvt_release_mmap_info);
David Brazdil0f672f62019-12-10 10:32:29 +0000345 kvfree(srq->rq.kwq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000346}