Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/fs/dax.c b/fs/dax.c
index 12953e8..d5d7b93 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -144,6 +144,16 @@
struct exceptional_entry_key key;
};
+/**
+ * enum dax_wake_mode: waitqueue wakeup behaviour
+ * @WAKE_ALL: wake all waiters in the waitqueue
+ * @WAKE_NEXT: wake only the first waiter in the waitqueue
+ */
+enum dax_wake_mode {
+ WAKE_ALL,
+ WAKE_NEXT,
+};
+
static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
void *entry, struct exceptional_entry_key *key)
{
@@ -182,7 +192,8 @@
* The important information it's conveying is whether the entry at
* this index used to be a PMD entry.
*/
-static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
+static void dax_wake_entry(struct xa_state *xas, void *entry,
+ enum dax_wake_mode mode)
{
struct exceptional_entry_key key;
wait_queue_head_t *wq;
@@ -196,7 +207,7 @@
* must be in the waitqueue and the following check will see them.
*/
if (waitqueue_active(wq))
- __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
+ __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
}
/*
@@ -264,11 +275,11 @@
finish_wait(wq, &ewait.wait);
}
-static void put_unlocked_entry(struct xa_state *xas, void *entry)
+static void put_unlocked_entry(struct xa_state *xas, void *entry,
+ enum dax_wake_mode mode)
{
- /* If we were the only waiter woken, wake the next one */
if (entry && !dax_is_conflict(entry))
- dax_wake_entry(xas, entry, false);
+ dax_wake_entry(xas, entry, mode);
}
/*
@@ -286,7 +297,7 @@
old = xas_store(xas, entry);
xas_unlock_irq(xas);
BUG_ON(!dax_is_locked(old));
- dax_wake_entry(xas, entry, false);
+ dax_wake_entry(xas, entry, WAKE_NEXT);
}
/*
@@ -489,7 +500,7 @@
if (dax_is_conflict(entry))
goto fallback;
if (!xa_is_value(entry)) {
- xas_set_err(xas, EIO);
+ xas_set_err(xas, -EIO);
goto out_unlock;
}
@@ -525,7 +536,7 @@
dax_disassociate_entry(entry, mapping, false);
xas_store(xas, NULL); /* undo the PMD join */
- dax_wake_entry(xas, entry, true);
+ dax_wake_entry(xas, entry, WAKE_ALL);
mapping->nrexceptional--;
entry = NULL;
xas_set(xas, index);
@@ -560,8 +571,11 @@
}
/**
- * dax_layout_busy_page - find first pinned page in @mapping
+ * dax_layout_busy_page_range - find first pinned page in @mapping
* @mapping: address space to scan for a page with ref count > 1
+ * @start: Starting offset. Page containing 'start' is included.
+ * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX,
+ * pages from 'start' till the end of file are included.
*
* DAX requires ZONE_DEVICE mapped pages. These pages are never
* 'onlined' to the page allocator so they are considered idle when
@@ -574,12 +588,15 @@
* to be able to run unmap_mapping_range() and subsequently not race
* mapping_mapped() becoming true.
*/
-struct page *dax_layout_busy_page(struct address_space *mapping)
+struct page *dax_layout_busy_page_range(struct address_space *mapping,
+ loff_t start, loff_t end)
{
- XA_STATE(xas, &mapping->i_pages, 0);
void *entry;
unsigned int scanned = 0;
struct page *page = NULL;
+ pgoff_t start_idx = start >> PAGE_SHIFT;
+ pgoff_t end_idx;
+ XA_STATE(xas, &mapping->i_pages, start_idx);
/*
* In the 'limited' case get_user_pages() for dax is disabled.
@@ -590,6 +607,11 @@
if (!dax_mapping(mapping) || !mapping_mapped(mapping))
return NULL;
+ /* If end == LLONG_MAX, all pages from start to till end of file */
+ if (end == LLONG_MAX)
+ end_idx = ULONG_MAX;
+ else
+ end_idx = end >> PAGE_SHIFT;
/*
* If we race get_user_pages_fast() here either we'll see the
* elevated page count in the iteration and wait, or
@@ -597,22 +619,22 @@
* against is no longer mapped in the page tables and bail to the
* get_user_pages() slow path. The slow path is protected by
* pte_lock() and pmd_lock(). New references are not taken without
- * holding those locks, and unmap_mapping_range() will not zero the
+ * holding those locks, and unmap_mapping_pages() will not zero the
* pte or pmd without holding the respective lock, so we are
* guaranteed to either see new references or prevent new
* references from being established.
*/
- unmap_mapping_range(mapping, 0, 0, 0);
+ unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0);
xas_lock_irq(&xas);
- xas_for_each(&xas, entry, ULONG_MAX) {
+ xas_for_each(&xas, entry, end_idx) {
if (WARN_ON_ONCE(!xa_is_value(entry)))
continue;
if (unlikely(dax_is_locked(entry)))
entry = get_unlocked_entry(&xas, 0);
if (entry)
page = dax_busy_page(entry);
- put_unlocked_entry(&xas, entry);
+ put_unlocked_entry(&xas, entry, WAKE_NEXT);
if (page)
break;
if (++scanned % XA_CHECK_SCHED)
@@ -626,6 +648,12 @@
xas_unlock_irq(&xas);
return page;
}
+EXPORT_SYMBOL_GPL(dax_layout_busy_page_range);
+
+struct page *dax_layout_busy_page(struct address_space *mapping)
+{
+ return dax_layout_busy_page_range(mapping, 0, LLONG_MAX);
+}
EXPORT_SYMBOL_GPL(dax_layout_busy_page);
static int __dax_invalidate_entry(struct address_space *mapping,
@@ -648,7 +676,7 @@
mapping->nrexceptional--;
ret = 1;
out:
- put_unlocked_entry(&xas, entry);
+ put_unlocked_entry(&xas, entry, WAKE_ALL);
xas_unlock_irq(&xas);
return ret;
}
@@ -681,21 +709,20 @@
return __dax_invalidate_entry(mapping, index, false);
}
-static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
- sector_t sector, size_t size, struct page *to,
- unsigned long vaddr)
+static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_dev,
+ sector_t sector, struct page *to, unsigned long vaddr)
{
void *vto, *kaddr;
pgoff_t pgoff;
long rc;
int id;
- rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
+ rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
if (rc)
return rc;
id = dax_read_lock();
- rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
+ rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), &kaddr, NULL);
if (rc < 0) {
dax_read_unlock(id);
return rc;
@@ -922,13 +949,13 @@
xas_lock_irq(xas);
xas_store(xas, entry);
xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
- dax_wake_entry(xas, entry, false);
+ dax_wake_entry(xas, entry, WAKE_NEXT);
trace_dax_writeback_one(mapping->host, index, count);
return ret;
put_unlocked:
- put_unlocked_entry(xas, entry);
+ put_unlocked_entry(xas, entry, WAKE_NEXT);
return ret;
}
@@ -938,12 +965,11 @@
* on persistent storage prior to completion of the operation.
*/
int dax_writeback_mapping_range(struct address_space *mapping,
- struct block_device *bdev, struct writeback_control *wbc)
+ struct dax_device *dax_dev, struct writeback_control *wbc)
{
XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
struct inode *inode = mapping->host;
pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
- struct dax_device *dax_dev;
void *entry;
int ret = 0;
unsigned int scanned = 0;
@@ -954,10 +980,6 @@
if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
return 0;
- dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
- if (!dax_dev)
- return -EIO;
-
trace_dax_writeback_range(inode, xas.xa_index, end_index);
tag_pages_for_writeback(mapping, xas.xa_index, end_index);
@@ -978,7 +1000,6 @@
xas_lock_irq(&xas);
}
xas_unlock_irq(&xas);
- put_dax(dax_dev);
trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
return ret;
}
@@ -1045,54 +1066,46 @@
return ret;
}
-static bool dax_range_is_aligned(struct block_device *bdev,
- unsigned int offset, unsigned int length)
+s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
{
- unsigned short sector_size = bdev_logical_block_size(bdev);
+ sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
+ pgoff_t pgoff;
+ long rc, id;
+ void *kaddr;
+ bool page_aligned = false;
+ unsigned offset = offset_in_page(pos);
+ unsigned size = min_t(u64, PAGE_SIZE - offset, length);
- if (!IS_ALIGNED(offset, sector_size))
- return false;
- if (!IS_ALIGNED(length, sector_size))
- return false;
+ if (IS_ALIGNED(sector << SECTOR_SHIFT, PAGE_SIZE) &&
+ (size == PAGE_SIZE))
+ page_aligned = true;
- return true;
-}
+ rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff);
+ if (rc)
+ return rc;
-int __dax_zero_page_range(struct block_device *bdev,
- struct dax_device *dax_dev, sector_t sector,
- unsigned int offset, unsigned int size)
-{
- if (dax_range_is_aligned(bdev, offset, size)) {
- sector_t start_sector = sector + (offset >> 9);
+ id = dax_read_lock();
- return blkdev_issue_zeroout(bdev, start_sector,
- size >> 9, GFP_NOFS, 0);
- } else {
- pgoff_t pgoff;
- long rc, id;
- void *kaddr;
-
- rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
- if (rc)
- return rc;
-
- id = dax_read_lock();
- rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
- if (rc < 0) {
- dax_read_unlock(id);
- return rc;
- }
- memset(kaddr + offset, 0, size);
- dax_flush(dax_dev, kaddr + offset, size);
+ if (page_aligned)
+ rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
+ else
+ rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL);
+ if (rc < 0) {
dax_read_unlock(id);
+ return rc;
}
- return 0;
+
+ if (!page_aligned) {
+ memset(kaddr + offset, 0, size);
+ dax_flush(iomap->dax_dev, kaddr + offset, size);
+ }
+ dax_read_unlock(id);
+ return size;
}
-EXPORT_SYMBOL_GPL(__dax_zero_page_range);
static loff_t
dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
- struct iomap *iomap)
+ struct iomap *iomap, struct iomap *srcmap)
{
struct block_device *bdev = iomap->bdev;
struct dax_device *dax_dev = iomap->dax_dev;
@@ -1252,7 +1265,8 @@
struct inode *inode = mapping->host;
unsigned long vaddr = vmf->address;
loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
- struct iomap iomap = { 0 };
+ struct iomap iomap = { .type = IOMAP_HOLE };
+ struct iomap srcmap = { .type = IOMAP_HOLE };
unsigned flags = IOMAP_FAULT;
int error, major = 0;
bool write = vmf->flags & FAULT_FLAG_WRITE;
@@ -1297,7 +1311,7 @@
* the file system block size to be equal the page size, which means
* that we never have to deal with more than a single extent here.
*/
- error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
+ error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap, &srcmap);
if (iomap_errp)
*iomap_errp = error;
if (error) {
@@ -1318,8 +1332,8 @@
clear_user_highpage(vmf->cow_page, vaddr);
break;
case IOMAP_MAPPED:
- error = copy_user_dax(iomap.bdev, iomap.dax_dev,
- sector, PAGE_SIZE, vmf->cow_page, vaddr);
+ error = copy_cow_page_dax(iomap.bdev, iomap.dax_dev,
+ sector, vmf->cow_page, vaddr);
break;
default:
WARN_ON_ONCE(1);
@@ -1381,7 +1395,7 @@
ret = dax_load_hole(&xas, mapping, &entry, vmf);
goto finish_iomap;
}
- /*FALLTHRU*/
+ fallthrough;
default:
WARN_ON_ONCE(1);
error = -EIO;
@@ -1476,7 +1490,8 @@
unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
struct inode *inode = mapping->host;
vm_fault_t result = VM_FAULT_FALLBACK;
- struct iomap iomap = { 0 };
+ struct iomap iomap = { .type = IOMAP_HOLE };
+ struct iomap srcmap = { .type = IOMAP_HOLE };
pgoff_t max_pgoff;
void *entry;
loff_t pos;
@@ -1551,7 +1566,8 @@
* to look up our filesystem block.
*/
pos = (loff_t)xas.xa_index << PAGE_SHIFT;
- error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
+ error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap,
+ &srcmap);
if (error)
goto unlock_entry;
@@ -1680,7 +1696,7 @@
/* Did we race with someone splitting entry or so? */
if (!entry || dax_is_conflict(entry) ||
(order == 0 && !dax_is_pte_entry(entry))) {
- put_unlocked_entry(&xas, entry);
+ put_unlocked_entry(&xas, entry, WAKE_NEXT);
xas_unlock_irq(&xas);
trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
VM_FAULT_NOPAGE);