Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 2e1c4d2..039b923 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* IBM Accelerator Family 'GenWQE'
*
* (C) Copyright IBM Corp. 2013
@@ -27,7 +27,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include "genwqe_driver.h"
#include "card_base.h"
@@ -129,6 +129,9 @@
/**
* genwqe_read_app_id() - Extract app_id
+ * @cd: genwqe device descriptor
+ * @app_name: carrier used to pass-back name
+ * @len: length of data for name
*
* app_unitcfg need to be filled with valid data first
*/
@@ -183,7 +186,7 @@
* @init: initial crc (0xffffffff at start)
*
* polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009)
-
+ *
* Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should
* result in a crc32 of 0xf33cb7d3.
*
@@ -277,7 +280,7 @@
return roundup(len, PAGE_SIZE);
}
-/**
+/*
* genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages
*
* Allocates memory for sgl and overlapping pages. Pages which might
@@ -460,6 +463,8 @@
/**
* genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages
+ * @cd: genwqe device descriptor
+ * @sgl: scatter gather list describing user-space memory
*
* After the DMA transfer has been completed we free the memory for
* the sgl and the cached pages. Data is being transferred from cached
@@ -515,30 +520,6 @@
}
/**
- * genwqe_free_user_pages() - Give pinned pages back
- *
- * Documentation of get_user_pages is in mm/gup.c:
- *
- * If the page is written to, set_page_dirty (or set_page_dirty_lock,
- * as appropriate) must be called after the page is finished with, and
- * before put_page is called.
- */
-static int genwqe_free_user_pages(struct page **page_list,
- unsigned int nr_pages, int dirty)
-{
- unsigned int i;
-
- for (i = 0; i < nr_pages; i++) {
- if (page_list[i] != NULL) {
- if (dirty)
- set_page_dirty_lock(page_list[i]);
- put_page(page_list[i]);
- }
- }
- return 0;
-}
-
-/**
* genwqe_user_vmap() - Map user-space memory to virtual kernel memory
* @cd: pointer to genwqe device
* @m: mapping params
@@ -597,18 +578,18 @@
m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages);
/* pin user pages in memory */
- rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
+ rc = pin_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
m->nr_pages,
m->write ? FOLL_WRITE : 0, /* readable/writable */
m->page_list); /* ptrs to pages */
if (rc < 0)
- goto fail_get_user_pages;
+ goto fail_pin_user_pages;
- /* assumption: get_user_pages can be killed by signals. */
+ /* assumption: pin_user_pages can be killed by signals. */
if (rc < m->nr_pages) {
- genwqe_free_user_pages(m->page_list, rc, m->write);
+ unpin_user_pages_dirty_lock(m->page_list, rc, m->write);
rc = -EFAULT;
- goto fail_get_user_pages;
+ goto fail_pin_user_pages;
}
rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list);
@@ -618,9 +599,9 @@
return 0;
fail_free_user_pages:
- genwqe_free_user_pages(m->page_list, m->nr_pages, m->write);
+ unpin_user_pages_dirty_lock(m->page_list, m->nr_pages, m->write);
- fail_get_user_pages:
+ fail_pin_user_pages:
kfree(m->page_list);
m->page_list = NULL;
m->dma_list = NULL;
@@ -650,8 +631,8 @@
genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
if (m->page_list) {
- genwqe_free_user_pages(m->page_list, m->nr_pages, m->write);
-
+ unpin_user_pages_dirty_lock(m->page_list, m->nr_pages,
+ m->write);
kfree(m->page_list);
m->page_list = NULL;
m->dma_list = NULL;
@@ -734,6 +715,7 @@
/**
* genwqe_set_interrupt_capability() - Configure MSI capability structure
* @cd: pointer to the device
+ * @count: number of vectors to allocate
* Return: 0 if no error
*/
int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
@@ -762,7 +744,7 @@
* @i: index to desired entry
* @m: maximum possible entries
* @addr: addr which is read
- * @index: index in debug array
+ * @idx: index in debug array
* @val: read value
*/
static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r,
@@ -842,6 +824,8 @@
/**
* genwqe_ffdc_buff_size() - Calculates the number of dump registers
+ * @cd: genwqe device descriptor
+ * @uid: unit ID
*/
int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
{
@@ -895,6 +879,10 @@
/**
* genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure
+ * @cd: genwqe device descriptor
+ * @uid: unit ID
+ * @regs: register information
+ * @max_regs: number of register entries
*/
int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
struct genwqe_reg *regs, unsigned int max_regs)
@@ -980,6 +968,10 @@
/**
* genwqe_write_vreg() - Write register in virtual window
+ * @cd: genwqe device descriptor
+ * @reg: register (byte) offset within BAR
+ * @val: value to write
+ * @func: PCI virtual function
*
* Note, these registers are only accessible to the PF through the
* VF-window. It is not intended for the VF to access.
@@ -993,6 +985,9 @@
/**
* genwqe_read_vreg() - Read register in virtual window
+ * @cd: genwqe device descriptor
+ * @reg: register (byte) offset within BAR
+ * @func: PCI virtual function
*
* Note, these registers are only accessible to the PF through the
* VF-window. It is not intended for the VF to access.
@@ -1005,6 +1000,7 @@
/**
* genwqe_base_clock_frequency() - Deteremine base clock frequency of the card
+ * @cd: genwqe device descriptor
*
* Note: From a design perspective it turned out to be a bad idea to
* use codes here to specifiy the frequency/speed values. An old
@@ -1029,6 +1025,7 @@
/**
* genwqe_stop_traps() - Stop traps
+ * @cd: genwqe device descriptor
*
* Before reading out the analysis data, we need to stop the traps.
*/
@@ -1039,6 +1036,7 @@
/**
* genwqe_start_traps() - Start traps
+ * @cd: genwqe device descriptor
*
* After having read the data, we can/must enable the traps again.
*/