Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 3526bb1..95ca4f9 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -212,7 +212,7 @@
mm = alloc->vma_vm_mm;
if (mm) {
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = alloc->vma;
}
@@ -268,10 +268,9 @@
alloc->pages_high = index + 1;
trace_binder_alloc_page_end(alloc, index);
- /* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
return 0;
@@ -304,7 +303,7 @@
}
err_no_vma:
if (mm) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
return vma ? -ENOMEM : -ESRCH;
@@ -339,12 +338,50 @@
return vma;
}
+static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
+{
+ /*
+ * Find the amount and size of buffers allocated by the current caller;
+ * The idea is that once we cross the threshold, whoever is responsible
+ * for the low async space is likely to try to send another async txn,
+ * and at some point we'll catch them in the act. This is more efficient
+ * than keeping a map per pid.
+ */
+ struct rb_node *n;
+ struct binder_buffer *buffer;
+ size_t total_alloc_size = 0;
+ size_t num_buffers = 0;
+
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ if (buffer->pid != pid)
+ continue;
+ if (!buffer->async_transaction)
+ continue;
+ total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
+ + sizeof(struct binder_buffer);
+ num_buffers++;
+ }
+
+ /*
+ * Warn if this pid has more than 50 transactions, or more than 50% of
+ * async space (which is 25% of total buffer size).
+ */
+ if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
+ alloc->pid, pid, num_buffers, total_alloc_size);
+ }
+}
+
static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
- int is_async)
+ int is_async,
+ int pid)
{
struct rb_node *n = alloc->free_buffers.rb_node;
struct binder_buffer *buffer;
@@ -487,11 +524,20 @@
buffer->offsets_size = offsets_size;
buffer->async_transaction = is_async;
buffer->extra_buffers_size = extra_buffers_size;
+ buffer->pid = pid;
if (is_async) {
alloc->free_async_space -= size + sizeof(struct binder_buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
+ if (alloc->free_async_space < alloc->buffer_size / 10) {
+ /*
+ * Start detecting spammers once we have less than 20%
+ * of async space left (which is less than 10% of total
+ * buffer size).
+ */
+ debug_low_async_space_locked(alloc, pid);
+ }
}
return buffer;
@@ -509,6 +555,7 @@
* @offsets_size: user specified buffer offset
* @extra_buffers_size: size of extra space for meta-data (eg, security context)
* @is_async: buffer for async transaction
+ * @pid: pid to attribute allocation to (used for debugging)
*
* Allocate a new buffer given the requested sizes. Returns
* the kernel version of the buffer pointer. The size allocated
@@ -521,13 +568,14 @@
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
- int is_async)
+ int is_async,
+ int pid)
{
struct binder_buffer *buffer;
mutex_lock(&alloc->mutex);
buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
- extra_buffers_size, is_async);
+ extra_buffers_size, is_async, pid);
mutex_unlock(&alloc->mutex);
return buffer;
}
@@ -548,6 +596,7 @@
{
struct binder_buffer *prev, *next = NULL;
bool to_free = true;
+
BUG_ON(alloc->buffers.next == &buffer->entry);
prev = binder_buffer_prev(buffer);
BUG_ON(!prev->free);
@@ -613,7 +662,7 @@
BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
- alloc->free_async_space += size + sizeof(struct binder_buffer);
+ alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_free_buf size %zd async free %zd\n",
@@ -654,7 +703,7 @@
* @alloc: binder_alloc for this proc
* @buffer: kernel pointer to buffer
*
- * Free the buffer allocated via binder_alloc_new_buffer()
+ * Free the buffer allocated via binder_alloc_new_buf()
*/
void binder_alloc_free_buf(struct binder_alloc *alloc,
struct binder_buffer *buffer)
@@ -951,8 +1000,8 @@
mm = alloc->vma_vm_mm;
if (!mmget_not_zero(mm))
goto err_mmget;
- if (!down_read_trylock(&mm->mmap_sem))
- goto err_down_read_mmap_sem_failed;
+ if (!mmap_read_trylock(mm))
+ goto err_mmap_read_lock_failed;
vma = binder_alloc_get_vma(alloc);
list_lru_isolate(lru, item);
@@ -965,7 +1014,7 @@
trace_binder_unmap_user_end(alloc, index);
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput_async(mm);
trace_binder_unmap_kernel_start(alloc, index);
@@ -979,7 +1028,7 @@
mutex_unlock(&alloc->mutex);
return LRU_REMOVED_RETRY;
-err_down_read_mmap_sem_failed:
+err_mmap_read_lock_failed:
mmput_async(mm);
err_mmget:
err_page_already_freed: