Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | */ |
| 32 | |
| 33 | #include <linux/module.h> |
| 34 | #include <linux/moduleparam.h> |
| 35 | #include <rdma/ib_umem.h> |
| 36 | #include <linux/atomic.h> |
| 37 | #include <rdma/ib_user_verbs.h> |
| 38 | |
| 39 | #include "iw_cxgb4.h" |
| 40 | |
| 41 | int use_dsgl = 1; |
| 42 | module_param(use_dsgl, int, 0644); |
| 43 | MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1) (DEPRECATED)"); |
| 44 | |
| 45 | #define T4_ULPTX_MIN_IO 32 |
| 46 | #define C4IW_MAX_INLINE_SIZE 96 |
| 47 | #define T4_ULPTX_MAX_DMA 1024 |
| 48 | #define C4IW_INLINE_THRESHOLD 128 |
| 49 | |
| 50 | static int inline_threshold = C4IW_INLINE_THRESHOLD; |
| 51 | module_param(inline_threshold, int, 0644); |
| 52 | MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)"); |
| 53 | |
| 54 | static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length) |
| 55 | { |
| 56 | return (is_t4(dev->rdev.lldi.adapter_type) || |
| 57 | is_t5(dev->rdev.lldi.adapter_type)) && |
| 58 | length >= 8*1024*1024*1024ULL; |
| 59 | } |
| 60 | |
| 61 | static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, |
| 62 | u32 len, dma_addr_t data, |
| 63 | struct sk_buff *skb, |
| 64 | struct c4iw_wr_wait *wr_waitp) |
| 65 | { |
| 66 | struct ulp_mem_io *req; |
| 67 | struct ulptx_sgl *sgl; |
| 68 | u8 wr_len; |
| 69 | int ret = 0; |
| 70 | |
| 71 | addr &= 0x7FFFFFF; |
| 72 | |
| 73 | if (wr_waitp) |
| 74 | c4iw_init_wr_wait(wr_waitp); |
| 75 | wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16); |
| 76 | |
| 77 | if (!skb) { |
| 78 | skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); |
| 79 | if (!skb) |
| 80 | return -ENOMEM; |
| 81 | } |
| 82 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); |
| 83 | |
| 84 | req = __skb_put_zero(skb, wr_len); |
| 85 | INIT_ULPTX_WR(req, wr_len, 0, 0); |
| 86 | req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | |
| 87 | (wr_waitp ? FW_WR_COMPL_F : 0)); |
| 88 | req->wr.wr_lo = wr_waitp ? (__force __be64)(unsigned long)wr_waitp : 0L; |
| 89 | req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16))); |
| 90 | req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | |
| 91 | T5_ULP_MEMIO_ORDER_V(1) | |
| 92 | T5_ULP_MEMIO_FID_V(rdev->lldi.rxq_ids[0])); |
| 93 | req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5)); |
| 94 | req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); |
| 95 | req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr)); |
| 96 | |
| 97 | sgl = (struct ulptx_sgl *)(req + 1); |
| 98 | sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) | |
| 99 | ULPTX_NSGE_V(1)); |
| 100 | sgl->len0 = cpu_to_be32(len); |
| 101 | sgl->addr0 = cpu_to_be64(data); |
| 102 | |
| 103 | if (wr_waitp) |
| 104 | ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__); |
| 105 | else |
| 106 | ret = c4iw_ofld_send(rdev, skb); |
| 107 | return ret; |
| 108 | } |
| 109 | |
| 110 | static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, |
| 111 | void *data, struct sk_buff *skb, |
| 112 | struct c4iw_wr_wait *wr_waitp) |
| 113 | { |
| 114 | struct ulp_mem_io *req; |
| 115 | struct ulptx_idata *sc; |
| 116 | u8 wr_len, *to_dp, *from_dp; |
| 117 | int copy_len, num_wqe, i, ret = 0; |
| 118 | __be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE)); |
| 119 | |
| 120 | if (is_t4(rdev->lldi.adapter_type)) |
| 121 | cmd |= cpu_to_be32(ULP_MEMIO_ORDER_F); |
| 122 | else |
| 123 | cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F); |
| 124 | |
| 125 | addr &= 0x7FFFFFF; |
| 126 | pr_debug("addr 0x%x len %u\n", addr, len); |
| 127 | num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE); |
| 128 | c4iw_init_wr_wait(wr_waitp); |
| 129 | for (i = 0; i < num_wqe; i++) { |
| 130 | |
| 131 | copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE : |
| 132 | len; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 133 | wr_len = roundup(sizeof(*req) + sizeof(*sc) + |
| 134 | roundup(copy_len, T4_ULPTX_MIN_IO), |
| 135 | 16); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 136 | |
| 137 | if (!skb) { |
| 138 | skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); |
| 139 | if (!skb) |
| 140 | return -ENOMEM; |
| 141 | } |
| 142 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); |
| 143 | |
| 144 | req = __skb_put_zero(skb, wr_len); |
| 145 | INIT_ULPTX_WR(req, wr_len, 0, 0); |
| 146 | |
| 147 | if (i == (num_wqe-1)) { |
| 148 | req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | |
| 149 | FW_WR_COMPL_F); |
| 150 | req->wr.wr_lo = (__force __be64)(unsigned long)wr_waitp; |
| 151 | } else |
| 152 | req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR)); |
| 153 | req->wr.wr_mid = cpu_to_be32( |
| 154 | FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16))); |
| 155 | |
| 156 | req->cmd = cmd; |
| 157 | req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V( |
| 158 | DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO))); |
| 159 | req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), |
| 160 | 16)); |
| 161 | req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr + i * 3)); |
| 162 | |
| 163 | sc = (struct ulptx_idata *)(req + 1); |
| 164 | sc->cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM)); |
| 165 | sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO)); |
| 166 | |
| 167 | to_dp = (u8 *)(sc + 1); |
| 168 | from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE; |
| 169 | if (data) |
| 170 | memcpy(to_dp, from_dp, copy_len); |
| 171 | else |
| 172 | memset(to_dp, 0, copy_len); |
| 173 | if (copy_len % T4_ULPTX_MIN_IO) |
| 174 | memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO - |
| 175 | (copy_len % T4_ULPTX_MIN_IO)); |
| 176 | if (i == (num_wqe-1)) |
| 177 | ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, |
| 178 | __func__); |
| 179 | else |
| 180 | ret = c4iw_ofld_send(rdev, skb); |
| 181 | if (ret) |
| 182 | break; |
| 183 | skb = NULL; |
| 184 | len -= C4IW_MAX_INLINE_SIZE; |
| 185 | } |
| 186 | |
| 187 | return ret; |
| 188 | } |
| 189 | |
| 190 | static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, |
| 191 | void *data, struct sk_buff *skb, |
| 192 | struct c4iw_wr_wait *wr_waitp) |
| 193 | { |
| 194 | u32 remain = len; |
| 195 | u32 dmalen; |
| 196 | int ret = 0; |
| 197 | dma_addr_t daddr; |
| 198 | dma_addr_t save; |
| 199 | |
| 200 | daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE); |
| 201 | if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr)) |
| 202 | return -1; |
| 203 | save = daddr; |
| 204 | |
| 205 | while (remain > inline_threshold) { |
| 206 | if (remain < T4_ULPTX_MAX_DMA) { |
| 207 | if (remain & ~T4_ULPTX_MIN_IO) |
| 208 | dmalen = remain & ~(T4_ULPTX_MIN_IO-1); |
| 209 | else |
| 210 | dmalen = remain; |
| 211 | } else |
| 212 | dmalen = T4_ULPTX_MAX_DMA; |
| 213 | remain -= dmalen; |
| 214 | ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr, |
| 215 | skb, remain ? NULL : wr_waitp); |
| 216 | if (ret) |
| 217 | goto out; |
| 218 | addr += dmalen >> 5; |
| 219 | data += dmalen; |
| 220 | daddr += dmalen; |
| 221 | } |
| 222 | if (remain) |
| 223 | ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb, |
| 224 | wr_waitp); |
| 225 | out: |
| 226 | dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE); |
| 227 | return ret; |
| 228 | } |
| 229 | |
| 230 | /* |
| 231 | * write len bytes of data into addr (32B aligned address) |
| 232 | * If data is NULL, clear len byte of memory to zero. |
| 233 | */ |
| 234 | static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, |
| 235 | void *data, struct sk_buff *skb, |
| 236 | struct c4iw_wr_wait *wr_waitp) |
| 237 | { |
| 238 | int ret; |
| 239 | |
| 240 | if (!rdev->lldi.ulptx_memwrite_dsgl || !use_dsgl) { |
| 241 | ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb, |
| 242 | wr_waitp); |
| 243 | goto out; |
| 244 | } |
| 245 | |
| 246 | if (len <= inline_threshold) { |
| 247 | ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb, |
| 248 | wr_waitp); |
| 249 | goto out; |
| 250 | } |
| 251 | |
| 252 | ret = _c4iw_write_mem_dma(rdev, addr, len, data, skb, wr_waitp); |
| 253 | if (ret) { |
| 254 | pr_warn_ratelimited("%s: dma map failure (non fatal)\n", |
| 255 | pci_name(rdev->lldi.pdev)); |
| 256 | ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb, |
| 257 | wr_waitp); |
| 258 | } |
| 259 | out: |
| 260 | return ret; |
| 261 | |
| 262 | } |
| 263 | |
| 264 | /* |
| 265 | * Build and write a TPT entry. |
| 266 | * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size, |
| 267 | * pbl_size and pbl_addr |
| 268 | * OUT: stag index |
| 269 | */ |
| 270 | static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, |
| 271 | u32 *stag, u8 stag_state, u32 pdid, |
| 272 | enum fw_ri_stag_type type, enum fw_ri_mem_perms perm, |
| 273 | int bind_enabled, u32 zbva, u64 to, |
| 274 | u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr, |
| 275 | struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp) |
| 276 | { |
| 277 | int err; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 278 | struct fw_ri_tpte *tpt; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 279 | u32 stag_idx; |
| 280 | static atomic_t key; |
| 281 | |
| 282 | if (c4iw_fatal_error(rdev)) |
| 283 | return -EIO; |
| 284 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 285 | tpt = kmalloc(sizeof(*tpt), GFP_KERNEL); |
| 286 | if (!tpt) |
| 287 | return -ENOMEM; |
| 288 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 289 | stag_state = stag_state > 0; |
| 290 | stag_idx = (*stag) >> 8; |
| 291 | |
| 292 | if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) { |
| 293 | stag_idx = c4iw_get_resource(&rdev->resource.tpt_table); |
| 294 | if (!stag_idx) { |
| 295 | mutex_lock(&rdev->stats.lock); |
| 296 | rdev->stats.stag.fail++; |
| 297 | mutex_unlock(&rdev->stats.lock); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 298 | kfree(tpt); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 299 | return -ENOMEM; |
| 300 | } |
| 301 | mutex_lock(&rdev->stats.lock); |
| 302 | rdev->stats.stag.cur += 32; |
| 303 | if (rdev->stats.stag.cur > rdev->stats.stag.max) |
| 304 | rdev->stats.stag.max = rdev->stats.stag.cur; |
| 305 | mutex_unlock(&rdev->stats.lock); |
| 306 | *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); |
| 307 | } |
| 308 | pr_debug("stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", |
| 309 | stag_state, type, pdid, stag_idx); |
| 310 | |
| 311 | /* write TPT entry */ |
| 312 | if (reset_tpt_entry) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 313 | memset(tpt, 0, sizeof(*tpt)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 314 | else { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 315 | tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 316 | FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) | |
| 317 | FW_RI_TPTE_STAGSTATE_V(stag_state) | |
| 318 | FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid)); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 319 | tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 320 | (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) | |
| 321 | FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO : |
| 322 | FW_RI_VA_BASED_TO))| |
| 323 | FW_RI_TPTE_PS_V(page_size)); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 324 | tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32( |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 325 | FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3)); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 326 | tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL)); |
| 327 | tpt->va_hi = cpu_to_be32((u32)(to >> 32)); |
| 328 | tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL)); |
| 329 | tpt->dca_mwbcnt_pstag = cpu_to_be32(0); |
| 330 | tpt->len_hi = cpu_to_be32((u32)(len >> 32)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 331 | } |
| 332 | err = write_adapter_mem(rdev, stag_idx + |
| 333 | (rdev->lldi.vr->stag.start >> 5), |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 334 | sizeof(*tpt), tpt, skb, wr_waitp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 335 | |
| 336 | if (reset_tpt_entry) { |
| 337 | c4iw_put_resource(&rdev->resource.tpt_table, stag_idx); |
| 338 | mutex_lock(&rdev->stats.lock); |
| 339 | rdev->stats.stag.cur -= 32; |
| 340 | mutex_unlock(&rdev->stats.lock); |
| 341 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 342 | kfree(tpt); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 343 | return err; |
| 344 | } |
| 345 | |
| 346 | static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl, |
| 347 | u32 pbl_addr, u32 pbl_size, struct c4iw_wr_wait *wr_waitp) |
| 348 | { |
| 349 | int err; |
| 350 | |
| 351 | pr_debug("*pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n", |
| 352 | pbl_addr, rdev->lldi.vr->pbl.start, |
| 353 | pbl_size); |
| 354 | |
| 355 | err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL, |
| 356 | wr_waitp); |
| 357 | return err; |
| 358 | } |
| 359 | |
| 360 | static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size, |
| 361 | u32 pbl_addr, struct sk_buff *skb, |
| 362 | struct c4iw_wr_wait *wr_waitp) |
| 363 | { |
| 364 | return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, |
| 365 | pbl_size, pbl_addr, skb, wr_waitp); |
| 366 | } |
| 367 | |
| 368 | static int allocate_window(struct c4iw_rdev *rdev, u32 *stag, u32 pdid, |
| 369 | struct c4iw_wr_wait *wr_waitp) |
| 370 | { |
| 371 | *stag = T4_STAG_UNSET; |
| 372 | return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0, |
| 373 | 0UL, 0, 0, 0, 0, NULL, wr_waitp); |
| 374 | } |
| 375 | |
| 376 | static int deallocate_window(struct c4iw_rdev *rdev, u32 stag, |
| 377 | struct sk_buff *skb, |
| 378 | struct c4iw_wr_wait *wr_waitp) |
| 379 | { |
| 380 | return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0, |
| 381 | 0, skb, wr_waitp); |
| 382 | } |
| 383 | |
| 384 | static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid, |
| 385 | u32 pbl_size, u32 pbl_addr, |
| 386 | struct c4iw_wr_wait *wr_waitp) |
| 387 | { |
| 388 | *stag = T4_STAG_UNSET; |
| 389 | return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0, |
| 390 | 0UL, 0, 0, pbl_size, pbl_addr, NULL, wr_waitp); |
| 391 | } |
| 392 | |
| 393 | static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag) |
| 394 | { |
| 395 | u32 mmid; |
| 396 | |
| 397 | mhp->attr.state = 1; |
| 398 | mhp->attr.stag = stag; |
| 399 | mmid = stag >> 8; |
| 400 | mhp->ibmr.rkey = mhp->ibmr.lkey = stag; |
| 401 | mhp->ibmr.length = mhp->attr.len; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 402 | mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12); |
| 403 | pr_debug("mmid 0x%x mhp %p\n", mmid, mhp); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 404 | return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 405 | } |
| 406 | |
| 407 | static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, |
| 408 | struct c4iw_mr *mhp, int shift) |
| 409 | { |
| 410 | u32 stag = T4_STAG_UNSET; |
| 411 | int ret; |
| 412 | |
| 413 | ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, |
| 414 | FW_RI_STAG_NSMR, mhp->attr.len ? |
| 415 | mhp->attr.perms : 0, |
| 416 | mhp->attr.mw_bind_enable, mhp->attr.zbva, |
| 417 | mhp->attr.va_fbo, mhp->attr.len ? |
| 418 | mhp->attr.len : -1, shift - 12, |
| 419 | mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL, |
| 420 | mhp->wr_waitp); |
| 421 | if (ret) |
| 422 | return ret; |
| 423 | |
| 424 | ret = finish_mem_reg(mhp, stag); |
| 425 | if (ret) { |
| 426 | dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, |
| 427 | mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp); |
| 428 | mhp->dereg_skb = NULL; |
| 429 | } |
| 430 | return ret; |
| 431 | } |
| 432 | |
| 433 | static int alloc_pbl(struct c4iw_mr *mhp, int npages) |
| 434 | { |
| 435 | mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev, |
| 436 | npages << 3); |
| 437 | |
| 438 | if (!mhp->attr.pbl_addr) |
| 439 | return -ENOMEM; |
| 440 | |
| 441 | mhp->attr.pbl_size = npages; |
| 442 | |
| 443 | return 0; |
| 444 | } |
| 445 | |
| 446 | struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) |
| 447 | { |
| 448 | struct c4iw_dev *rhp; |
| 449 | struct c4iw_pd *php; |
| 450 | struct c4iw_mr *mhp; |
| 451 | int ret; |
| 452 | u32 stag = T4_STAG_UNSET; |
| 453 | |
| 454 | pr_debug("ib_pd %p\n", pd); |
| 455 | php = to_c4iw_pd(pd); |
| 456 | rhp = php->rhp; |
| 457 | |
| 458 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); |
| 459 | if (!mhp) |
| 460 | return ERR_PTR(-ENOMEM); |
| 461 | mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); |
| 462 | if (!mhp->wr_waitp) { |
| 463 | ret = -ENOMEM; |
| 464 | goto err_free_mhp; |
| 465 | } |
| 466 | c4iw_init_wr_wait(mhp->wr_waitp); |
| 467 | |
| 468 | mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL); |
| 469 | if (!mhp->dereg_skb) { |
| 470 | ret = -ENOMEM; |
| 471 | goto err_free_wr_wait; |
| 472 | } |
| 473 | |
| 474 | mhp->rhp = rhp; |
| 475 | mhp->attr.pdid = php->pdid; |
| 476 | mhp->attr.perms = c4iw_ib_to_tpt_access(acc); |
| 477 | mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND; |
| 478 | mhp->attr.zbva = 0; |
| 479 | mhp->attr.va_fbo = 0; |
| 480 | mhp->attr.page_size = 0; |
| 481 | mhp->attr.len = ~0ULL; |
| 482 | mhp->attr.pbl_size = 0; |
| 483 | |
| 484 | ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, |
| 485 | FW_RI_STAG_NSMR, mhp->attr.perms, |
| 486 | mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0, |
| 487 | NULL, mhp->wr_waitp); |
| 488 | if (ret) |
| 489 | goto err_free_skb; |
| 490 | |
| 491 | ret = finish_mem_reg(mhp, stag); |
| 492 | if (ret) |
| 493 | goto err_dereg_mem; |
| 494 | return &mhp->ibmr; |
| 495 | err_dereg_mem: |
| 496 | dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, |
| 497 | mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp); |
| 498 | err_free_skb: |
| 499 | kfree_skb(mhp->dereg_skb); |
| 500 | err_free_wr_wait: |
| 501 | c4iw_put_wr_wait(mhp->wr_waitp); |
| 502 | err_free_mhp: |
| 503 | kfree(mhp); |
| 504 | return ERR_PTR(ret); |
| 505 | } |
| 506 | |
| 507 | struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
| 508 | u64 virt, int acc, struct ib_udata *udata) |
| 509 | { |
| 510 | __be64 *pages; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 511 | int shift, n, i; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 512 | int err = -ENOMEM; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 513 | struct ib_block_iter biter; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 514 | struct c4iw_dev *rhp; |
| 515 | struct c4iw_pd *php; |
| 516 | struct c4iw_mr *mhp; |
| 517 | |
| 518 | pr_debug("ib_pd %p\n", pd); |
| 519 | |
| 520 | if (length == ~0ULL) |
| 521 | return ERR_PTR(-EINVAL); |
| 522 | |
| 523 | if ((length + start) < start) |
| 524 | return ERR_PTR(-EINVAL); |
| 525 | |
| 526 | php = to_c4iw_pd(pd); |
| 527 | rhp = php->rhp; |
| 528 | |
| 529 | if (mr_exceeds_hw_limits(rhp, length)) |
| 530 | return ERR_PTR(-EINVAL); |
| 531 | |
| 532 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); |
| 533 | if (!mhp) |
| 534 | return ERR_PTR(-ENOMEM); |
| 535 | mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); |
| 536 | if (!mhp->wr_waitp) |
| 537 | goto err_free_mhp; |
| 538 | |
| 539 | mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL); |
| 540 | if (!mhp->dereg_skb) |
| 541 | goto err_free_wr_wait; |
| 542 | |
| 543 | mhp->rhp = rhp; |
| 544 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 545 | mhp->umem = ib_umem_get(pd->device, start, length, acc); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 546 | if (IS_ERR(mhp->umem)) |
| 547 | goto err_free_skb; |
| 548 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 549 | shift = PAGE_SHIFT; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 550 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 551 | n = ib_umem_num_dma_blocks(mhp->umem, 1 << shift); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 552 | err = alloc_pbl(mhp, n); |
| 553 | if (err) |
| 554 | goto err_umem_release; |
| 555 | |
| 556 | pages = (__be64 *) __get_free_page(GFP_KERNEL); |
| 557 | if (!pages) { |
| 558 | err = -ENOMEM; |
| 559 | goto err_pbl_free; |
| 560 | } |
| 561 | |
| 562 | i = n = 0; |
| 563 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 564 | rdma_umem_for_each_dma_block(mhp->umem, &biter, 1 << shift) { |
| 565 | pages[i++] = cpu_to_be64(rdma_block_iter_dma_address(&biter)); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 566 | if (i == PAGE_SIZE / sizeof(*pages)) { |
| 567 | err = write_pbl(&mhp->rhp->rdev, pages, |
| 568 | mhp->attr.pbl_addr + (n << 3), i, |
| 569 | mhp->wr_waitp); |
| 570 | if (err) |
| 571 | goto pbl_done; |
| 572 | n += i; |
| 573 | i = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 574 | } |
| 575 | } |
| 576 | |
| 577 | if (i) |
| 578 | err = write_pbl(&mhp->rhp->rdev, pages, |
| 579 | mhp->attr.pbl_addr + (n << 3), i, |
| 580 | mhp->wr_waitp); |
| 581 | |
| 582 | pbl_done: |
| 583 | free_page((unsigned long) pages); |
| 584 | if (err) |
| 585 | goto err_pbl_free; |
| 586 | |
| 587 | mhp->attr.pdid = php->pdid; |
| 588 | mhp->attr.zbva = 0; |
| 589 | mhp->attr.perms = c4iw_ib_to_tpt_access(acc); |
| 590 | mhp->attr.va_fbo = virt; |
| 591 | mhp->attr.page_size = shift - 12; |
| 592 | mhp->attr.len = length; |
| 593 | |
| 594 | err = register_mem(rhp, php, mhp, shift); |
| 595 | if (err) |
| 596 | goto err_pbl_free; |
| 597 | |
| 598 | return &mhp->ibmr; |
| 599 | |
| 600 | err_pbl_free: |
| 601 | c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, |
| 602 | mhp->attr.pbl_size << 3); |
| 603 | err_umem_release: |
| 604 | ib_umem_release(mhp->umem); |
| 605 | err_free_skb: |
| 606 | kfree_skb(mhp->dereg_skb); |
| 607 | err_free_wr_wait: |
| 608 | c4iw_put_wr_wait(mhp->wr_waitp); |
| 609 | err_free_mhp: |
| 610 | kfree(mhp); |
| 611 | return ERR_PTR(err); |
| 612 | } |
| 613 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 614 | int c4iw_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 615 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 616 | struct c4iw_mw *mhp = to_c4iw_mw(ibmw); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 617 | struct c4iw_dev *rhp; |
| 618 | struct c4iw_pd *php; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 619 | u32 mmid; |
| 620 | u32 stag = 0; |
| 621 | int ret; |
| 622 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 623 | if (ibmw->type != IB_MW_TYPE_1) |
| 624 | return -EINVAL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 625 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 626 | php = to_c4iw_pd(ibmw->pd); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 627 | rhp = php->rhp; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 628 | mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 629 | if (!mhp->wr_waitp) |
| 630 | return -ENOMEM; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 631 | |
| 632 | mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL); |
| 633 | if (!mhp->dereg_skb) { |
| 634 | ret = -ENOMEM; |
| 635 | goto free_wr_wait; |
| 636 | } |
| 637 | |
| 638 | ret = allocate_window(&rhp->rdev, &stag, php->pdid, mhp->wr_waitp); |
| 639 | if (ret) |
| 640 | goto free_skb; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 641 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 642 | mhp->rhp = rhp; |
| 643 | mhp->attr.pdid = php->pdid; |
| 644 | mhp->attr.type = FW_RI_STAG_MW; |
| 645 | mhp->attr.stag = stag; |
| 646 | mmid = (stag) >> 8; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 647 | ibmw->rkey = stag; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 648 | if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 649 | ret = -ENOMEM; |
| 650 | goto dealloc_win; |
| 651 | } |
| 652 | pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 653 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 654 | |
| 655 | dealloc_win: |
| 656 | deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb, |
| 657 | mhp->wr_waitp); |
| 658 | free_skb: |
| 659 | kfree_skb(mhp->dereg_skb); |
| 660 | free_wr_wait: |
| 661 | c4iw_put_wr_wait(mhp->wr_waitp); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 662 | return ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 663 | } |
| 664 | |
| 665 | int c4iw_dealloc_mw(struct ib_mw *mw) |
| 666 | { |
| 667 | struct c4iw_dev *rhp; |
| 668 | struct c4iw_mw *mhp; |
| 669 | u32 mmid; |
| 670 | |
| 671 | mhp = to_c4iw_mw(mw); |
| 672 | rhp = mhp->rhp; |
| 673 | mmid = (mw->rkey) >> 8; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 674 | xa_erase_irq(&rhp->mrs, mmid); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 675 | deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb, |
| 676 | mhp->wr_waitp); |
| 677 | kfree_skb(mhp->dereg_skb); |
| 678 | c4iw_put_wr_wait(mhp->wr_waitp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 679 | return 0; |
| 680 | } |
| 681 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 682 | struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 683 | u32 max_num_sg) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 684 | { |
| 685 | struct c4iw_dev *rhp; |
| 686 | struct c4iw_pd *php; |
| 687 | struct c4iw_mr *mhp; |
| 688 | u32 mmid; |
| 689 | u32 stag = 0; |
| 690 | int ret = 0; |
| 691 | int length = roundup(max_num_sg * sizeof(u64), 32); |
| 692 | |
| 693 | php = to_c4iw_pd(pd); |
| 694 | rhp = php->rhp; |
| 695 | |
| 696 | if (mr_type != IB_MR_TYPE_MEM_REG || |
| 697 | max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl && |
| 698 | use_dsgl)) |
| 699 | return ERR_PTR(-EINVAL); |
| 700 | |
| 701 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); |
| 702 | if (!mhp) { |
| 703 | ret = -ENOMEM; |
| 704 | goto err; |
| 705 | } |
| 706 | |
| 707 | mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); |
| 708 | if (!mhp->wr_waitp) { |
| 709 | ret = -ENOMEM; |
| 710 | goto err_free_mhp; |
| 711 | } |
| 712 | c4iw_init_wr_wait(mhp->wr_waitp); |
| 713 | |
| 714 | mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev, |
| 715 | length, &mhp->mpl_addr, GFP_KERNEL); |
| 716 | if (!mhp->mpl) { |
| 717 | ret = -ENOMEM; |
| 718 | goto err_free_wr_wait; |
| 719 | } |
| 720 | mhp->max_mpl_len = length; |
| 721 | |
| 722 | mhp->rhp = rhp; |
| 723 | ret = alloc_pbl(mhp, max_num_sg); |
| 724 | if (ret) |
| 725 | goto err_free_dma; |
| 726 | mhp->attr.pbl_size = max_num_sg; |
| 727 | ret = allocate_stag(&rhp->rdev, &stag, php->pdid, |
| 728 | mhp->attr.pbl_size, mhp->attr.pbl_addr, |
| 729 | mhp->wr_waitp); |
| 730 | if (ret) |
| 731 | goto err_free_pbl; |
| 732 | mhp->attr.pdid = php->pdid; |
| 733 | mhp->attr.type = FW_RI_STAG_NSMR; |
| 734 | mhp->attr.stag = stag; |
| 735 | mhp->attr.state = 0; |
| 736 | mmid = (stag) >> 8; |
| 737 | mhp->ibmr.rkey = mhp->ibmr.lkey = stag; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 738 | if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 739 | ret = -ENOMEM; |
| 740 | goto err_dereg; |
| 741 | } |
| 742 | |
| 743 | pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag); |
| 744 | return &(mhp->ibmr); |
| 745 | err_dereg: |
| 746 | dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, |
| 747 | mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp); |
| 748 | err_free_pbl: |
| 749 | c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, |
| 750 | mhp->attr.pbl_size << 3); |
| 751 | err_free_dma: |
| 752 | dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev, |
| 753 | mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr); |
| 754 | err_free_wr_wait: |
| 755 | c4iw_put_wr_wait(mhp->wr_waitp); |
| 756 | err_free_mhp: |
| 757 | kfree(mhp); |
| 758 | err: |
| 759 | return ERR_PTR(ret); |
| 760 | } |
| 761 | |
| 762 | static int c4iw_set_page(struct ib_mr *ibmr, u64 addr) |
| 763 | { |
| 764 | struct c4iw_mr *mhp = to_c4iw_mr(ibmr); |
| 765 | |
| 766 | if (unlikely(mhp->mpl_len == mhp->attr.pbl_size)) |
| 767 | return -ENOMEM; |
| 768 | |
| 769 | mhp->mpl[mhp->mpl_len++] = addr; |
| 770 | |
| 771 | return 0; |
| 772 | } |
| 773 | |
| 774 | int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
| 775 | unsigned int *sg_offset) |
| 776 | { |
| 777 | struct c4iw_mr *mhp = to_c4iw_mr(ibmr); |
| 778 | |
| 779 | mhp->mpl_len = 0; |
| 780 | |
| 781 | return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page); |
| 782 | } |
| 783 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 784 | int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 785 | { |
| 786 | struct c4iw_dev *rhp; |
| 787 | struct c4iw_mr *mhp; |
| 788 | u32 mmid; |
| 789 | |
| 790 | pr_debug("ib_mr %p\n", ib_mr); |
| 791 | |
| 792 | mhp = to_c4iw_mr(ib_mr); |
| 793 | rhp = mhp->rhp; |
| 794 | mmid = mhp->attr.stag >> 8; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 795 | xa_erase_irq(&rhp->mrs, mmid); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 796 | if (mhp->mpl) |
| 797 | dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev, |
| 798 | mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr); |
| 799 | dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, |
| 800 | mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp); |
| 801 | if (mhp->attr.pbl_size) |
| 802 | c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, |
| 803 | mhp->attr.pbl_size << 3); |
| 804 | if (mhp->kva) |
| 805 | kfree((void *) (unsigned long) mhp->kva); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 806 | ib_umem_release(mhp->umem); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 807 | pr_debug("mmid 0x%x ptr %p\n", mmid, mhp); |
| 808 | c4iw_put_wr_wait(mhp->wr_waitp); |
| 809 | kfree(mhp); |
| 810 | return 0; |
| 811 | } |
| 812 | |
| 813 | void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey) |
| 814 | { |
| 815 | struct c4iw_mr *mhp; |
| 816 | unsigned long flags; |
| 817 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 818 | xa_lock_irqsave(&rhp->mrs, flags); |
| 819 | mhp = xa_load(&rhp->mrs, rkey >> 8); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 820 | if (mhp) |
| 821 | mhp->attr.state = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 822 | xa_unlock_irqrestore(&rhp->mrs, flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 823 | } |