Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 49c9541..a6a39f0 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -96,6 +96,8 @@
for (i = 0; i < rdi->lkey_table.max; i++)
RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
+ rdi->dparms.props.max_mr = rdi->lkey_table.max;
+ rdi->dparms.props.max_fmr = rdi->lkey_table.max;
return 0;
}
@@ -381,19 +383,18 @@
{
struct rvt_mr *mr;
struct ib_umem *umem;
- struct scatterlist *sg;
- int n, m, entry;
+ struct sg_page_iter sg_iter;
+ int n, m;
struct ib_mr *ret;
if (length == 0)
return ERR_PTR(-EINVAL);
- umem = ib_umem_get(pd->uobject->context, start, length,
- mr_access_flags, 0);
+ umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
if (IS_ERR(umem))
return (void *)umem;
- n = umem->nmap;
+ n = ib_umem_num_pages(umem);
mr = __rvt_alloc_mr(n, pd);
if (IS_ERR(mr)) {
@@ -408,23 +409,21 @@
mr->mr.access_flags = mr_access_flags;
mr->umem = umem;
- mr->mr.page_shift = umem->page_shift;
+ mr->mr.page_shift = PAGE_SHIFT;
m = 0;
n = 0;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ for_each_sg_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
void *vaddr;
- vaddr = page_address(sg_page(sg));
+ vaddr = page_address(sg_page_iter_page(&sg_iter));
if (!vaddr) {
ret = ERR_PTR(-EINVAL);
goto bail_inval;
}
mr->mr.map[m]->segs[n].vaddr = vaddr;
- mr->mr.map[m]->segs[n].length = BIT(umem->page_shift);
- trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr,
- BIT(umem->page_shift));
- n++;
- if (n == RVT_SEGSZ) {
+ mr->mr.map[m]->segs[n].length = PAGE_SIZE;
+ trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE);
+ if (++n == RVT_SEGSZ) {
m++;
n = 0;
}
@@ -551,7 +550,7 @@
*
* Returns 0 on success.
*/
-int rvt_dereg_mr(struct ib_mr *ibmr)
+int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct rvt_mr *mr = to_imr(ibmr);
int ret;
@@ -563,8 +562,7 @@
if (ret)
goto out;
rvt_deinit_mregion(&mr->mr);
- if (mr->umem)
- ib_umem_release(mr->umem);
+ ib_umem_release(mr->umem);
kfree(mr);
out:
return ret;
@@ -578,9 +576,8 @@
*
* Return: the memory region on success, otherwise return an errno.
*/
-struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg)
+struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata)
{
struct rvt_mr *mr;
@@ -611,17 +608,12 @@
if (unlikely(mapped_segs == mr->mr.max_segs))
return -ENOMEM;
- if (mr->mr.length == 0) {
- mr->mr.user_base = addr;
- mr->mr.iova = addr;
- }
-
m = mapped_segs / RVT_SEGSZ;
n = mapped_segs % RVT_SEGSZ;
mr->mr.map[m]->segs[n].vaddr = (void *)addr;
mr->mr.map[m]->segs[n].length = ps;
- trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps);
mr->mr.length += ps;
+ trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps);
return 0;
}
@@ -633,17 +625,25 @@
* @sg_nents: number of entries in sg
* @sg_offset: offset in bytes into sg
*
+ * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
+ *
* Return: number of sg elements mapped to the memory region
*/
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset)
{
struct rvt_mr *mr = to_imr(ibmr);
+ int ret;
mr->mr.length = 0;
mr->mr.page_shift = PAGE_SHIFT;
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
- rvt_set_page);
+ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
+ mr->mr.user_base = ibmr->iova;
+ mr->mr.iova = ibmr->iova;
+ mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
+ mr->mr.length = (size_t)ibmr->length;
+ trace_rvt_map_mr_sg(ibmr, sg_nents, sg_offset);
+ return ret;
}
/**
@@ -674,6 +674,7 @@
ibmr->rkey = key;
mr->mr.lkey = key;
mr->mr.access_flags = access;
+ mr->mr.iova = ibmr->iova;
atomic_set(&mr->mr.lkey_invalid, 0);
return 0;