Update Linux to v5.4.148
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.4.148.tar.gz
Change-Id: Ib3d26c5ba9b022e2e03533005c4fed4d7c30b61b
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/fs/io_uring.c b/fs/io_uring.c
index cbe8dab..478df7e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -70,6 +70,8 @@
#include <linux/nospec.h>
#include <linux/sizes.h>
#include <linux/hugetlb.h>
+#include <linux/highmem.h>
+#include <linux/fs_struct.h>
#include <uapi/linux/io_uring.h>
@@ -238,7 +240,7 @@
struct user_struct *user;
- struct cred *creds;
+ const struct cred *creds;
struct completion ctx_done;
@@ -265,6 +267,9 @@
#if defined(CONFIG_UNIX)
struct socket *ring_sock;
#endif
+
+ struct list_head task_list;
+ spinlock_t task_lock;
};
struct sqe_submit {
@@ -274,6 +279,7 @@
bool has_user;
bool needs_lock;
bool needs_fixed_file;
+ u8 opcode;
};
/*
@@ -329,11 +335,18 @@
#define REQ_F_ISREG 2048 /* regular file */
#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
#define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */
+#define REQ_F_CANCEL 16384 /* cancel request */
+ unsigned long fsize;
u64 user_data;
u32 result;
u32 sequence;
+ struct files_struct *files;
+
+ struct fs_struct *fs;
struct work_struct work;
+ struct task_struct *work_task;
+ struct list_head task_list;
};
#define IO_PLUG_THRESHOLD 2
@@ -404,6 +417,7 @@
}
ctx->flags = p->flags;
+ init_waitqueue_head(&ctx->sqo_wait);
init_waitqueue_head(&ctx->cq_wait);
init_completion(&ctx->ctx_done);
init_completion(&ctx->sqo_thread_started);
@@ -419,6 +433,8 @@
INIT_LIST_HEAD(&ctx->cancel_list);
INIT_LIST_HEAD(&ctx->defer_list);
INIT_LIST_HEAD(&ctx->timeout_list);
+ INIT_LIST_HEAD(&ctx->task_list);
+ spin_lock_init(&ctx->task_lock);
return ctx;
}
@@ -486,10 +502,11 @@
static inline void io_queue_async_work(struct io_ring_ctx *ctx,
struct io_kiocb *req)
{
+ unsigned long flags;
int rw = 0;
if (req->submit.sqe) {
- switch (req->submit.sqe->opcode) {
+ switch (req->submit.opcode) {
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
rw = !(req->rw.ki_flags & IOCB_DIRECT);
@@ -497,6 +514,15 @@
}
}
+ if (req->work.func == io_sq_wq_submit_work) {
+ req->files = current->files;
+
+ spin_lock_irqsave(&ctx->task_lock, flags);
+ list_add(&req->task_list, &ctx->task_list);
+ req->work_task = NULL;
+ spin_unlock_irqrestore(&ctx->task_lock, flags);
+ }
+
queue_work(ctx->sqo_wq[rw], &req->work);
}
@@ -644,12 +670,14 @@
state->cur_req++;
}
+ INIT_LIST_HEAD(&req->task_list);
req->file = NULL;
req->ctx = ctx;
req->flags = 0;
/* one is dropped after submission, the other at completion */
refcount_set(&req->refs, 2);
req->result = 0;
+ req->fs = NULL;
return req;
out:
percpu_ref_put(&ctx->refs);
@@ -881,11 +909,17 @@
mutex_unlock(&ctx->uring_lock);
}
-static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
- long min)
+static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+ long min)
{
int iters = 0, ret = 0;
+ /*
+ * We disallow the app entering submit/complete with polling, but we
+ * still need to lock the ring to prevent racing with polled issue
+ * that got punted to a workqueue.
+ */
+ mutex_lock(&ctx->uring_lock);
do {
int tmin = 0;
@@ -921,21 +955,6 @@
ret = 0;
} while (min && !*nr_events && !need_resched());
- return ret;
-}
-
-static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
- long min)
-{
- int ret;
-
- /*
- * We disallow the app entering submit/complete with polling, but we
- * still need to lock the ring to prevent racing with polled issue
- * that got punted to a workqueue.
- */
- mutex_lock(&ctx->uring_lock);
- ret = __io_iopoll_check(ctx, nr_events, min);
mutex_unlock(&ctx->uring_lock);
return ret;
}
@@ -1089,6 +1108,9 @@
if (S_ISREG(file_inode(req->file)->i_mode))
req->flags |= REQ_F_ISREG;
+ if (force_nonblock)
+ req->fsize = rlimit(RLIMIT_FSIZE);
+
/*
* If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
* we know to async punt it even if it was opened O_NONBLOCK
@@ -1236,23 +1258,15 @@
}
static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
- const struct sqe_submit *s, struct iovec **iovec,
+ struct io_kiocb *req, struct iovec **iovec,
struct iov_iter *iter)
{
- const struct io_uring_sqe *sqe = s->sqe;
+ const struct io_uring_sqe *sqe = req->submit.sqe;
void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
size_t sqe_len = READ_ONCE(sqe->len);
u8 opcode;
- /*
- * We're reading ->opcode for the second time, but the first read
- * doesn't care whether it's _FIXED or not, so it doesn't matter
- * whether ->opcode changes concurrently. The first read does care
- * about whether it is a READ or a WRITE, so we don't trust this read
- * for that purpose and instead let the caller pass in the read/write
- * flag.
- */
- opcode = READ_ONCE(sqe->opcode);
+ opcode = req->submit.opcode;
if (opcode == IORING_OP_READ_FIXED ||
opcode == IORING_OP_WRITE_FIXED) {
ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
@@ -1260,7 +1274,7 @@
return ret;
}
- if (!s->has_user)
+ if (!req->submit.has_user)
return -EFAULT;
#ifdef CONFIG_COMPAT
@@ -1351,9 +1365,19 @@
return -EAGAIN;
while (iov_iter_count(iter)) {
- struct iovec iovec = iov_iter_iovec(iter);
+ struct iovec iovec;
ssize_t nr;
+ if (!iov_iter_is_bvec(iter)) {
+ iovec = iov_iter_iovec(iter);
+ } else {
+ /* fixed buffers import bvec */
+ iovec.iov_base = kmap(iter->bvec->bv_page)
+ + iter->iov_offset;
+ iovec.iov_len = min(iter->count,
+ iter->bvec->bv_len - iter->iov_offset);
+ }
+
if (rw == READ) {
nr = file->f_op->read(file, iovec.iov_base,
iovec.iov_len, &kiocb->ki_pos);
@@ -1362,6 +1386,9 @@
iovec.iov_len, &kiocb->ki_pos);
}
+ if (iov_iter_is_bvec(iter))
+ kunmap(iter->bvec->bv_page);
+
if (nr < 0) {
if (!ret)
ret = nr;
@@ -1394,7 +1421,7 @@
if (unlikely(!(file->f_mode & FMODE_READ)))
return -EBADF;
- ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
+ ret = io_import_iovec(req->ctx, READ, req, &iovec, &iter);
if (ret < 0)
return ret;
@@ -1409,8 +1436,10 @@
if (file->f_op->read_iter)
ret2 = call_read_iter(file, kiocb, &iter);
- else
+ else if (req->file->f_op->read)
ret2 = loop_rw_iter(READ, file, kiocb, &iter);
+ else
+ ret2 = -EINVAL;
/*
* In case of a short read, punt to async. This can happen
@@ -1459,7 +1488,7 @@
if (unlikely(!(file->f_mode & FMODE_WRITE)))
return -EBADF;
- ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
+ ret = io_import_iovec(req->ctx, WRITE, req, &iovec, &iter);
if (ret < 0)
return ret;
@@ -1495,10 +1524,19 @@
}
kiocb->ki_flags |= IOCB_WRITE;
+ if (!force_nonblock)
+ current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
+
if (file->f_op->write_iter)
ret2 = call_write_iter(file, kiocb, &iter);
- else
+ else if (req->file->f_op->write)
ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
+ else
+ ret2 = -EINVAL;
+
+ if (!force_nonblock)
+ current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
+
if (!force_nonblock || ret2 != -EAGAIN) {
io_rw_done(kiocb, ret2);
} else {
@@ -1648,14 +1686,31 @@
else if (force_nonblock)
flags |= MSG_DONTWAIT;
+#ifdef CONFIG_COMPAT
+ if (req->ctx->compat)
+ flags |= MSG_CMSG_COMPAT;
+#endif
+
msg = (struct user_msghdr __user *) (unsigned long)
READ_ONCE(sqe->addr);
ret = fn(sock, msg, flags);
if (force_nonblock && ret == -EAGAIN)
return ret;
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
}
+ if (req->fs) {
+ struct fs_struct *fs = req->fs;
+
+ spin_lock(&req->fs->lock);
+ if (--fs->users)
+ fs = NULL;
+ spin_unlock(&req->fs->lock);
+ if (fs)
+ free_fs_struct(fs);
+ }
io_cqring_add_event(req->ctx, sqe->user_data, ret);
io_put_req(req);
return 0;
@@ -2023,7 +2078,7 @@
}
static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
+ struct sqe_submit *s)
{
struct io_uring_sqe *sqe_copy;
@@ -2041,7 +2096,8 @@
return 0;
}
- memcpy(sqe_copy, sqe, sizeof(*sqe_copy));
+ memcpy(&req->submit, s, sizeof(*s));
+ memcpy(sqe_copy, s->sqe, sizeof(*sqe_copy));
req->submit.sqe = sqe_copy;
INIT_WORK(&req->work, io_sq_wq_submit_work);
@@ -2053,15 +2109,14 @@
static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct sqe_submit *s, bool force_nonblock)
{
- int ret, opcode;
+ int ret;
req->user_data = READ_ONCE(s->sqe->user_data);
if (unlikely(s->index >= ctx->sq_entries))
return -EINVAL;
- opcode = READ_ONCE(s->sqe->opcode);
- switch (opcode) {
+ switch (req->submit.opcode) {
case IORING_OP_NOP:
ret = io_nop(req, req->user_data);
break;
@@ -2125,10 +2180,10 @@
return 0;
}
-static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx,
- const struct io_uring_sqe *sqe)
+static struct async_list *io_async_list_from_req(struct io_ring_ctx *ctx,
+ struct io_kiocb *req)
{
- switch (sqe->opcode) {
+ switch (req->submit.opcode) {
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
return &ctx->pending_async[READ];
@@ -2140,17 +2195,16 @@
}
}
-static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
+static inline bool io_req_needs_user(struct io_kiocb *req)
{
- u8 opcode = READ_ONCE(sqe->opcode);
-
- return !(opcode == IORING_OP_READ_FIXED ||
- opcode == IORING_OP_WRITE_FIXED);
+ return !(req->submit.opcode == IORING_OP_READ_FIXED ||
+ req->submit.opcode == IORING_OP_WRITE_FIXED);
}
static void io_sq_wq_submit_work(struct work_struct *work)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ struct fs_struct *old_fs_struct = current->fs;
struct io_ring_ctx *ctx = req->ctx;
struct mm_struct *cur_mm = NULL;
struct async_list *async_list;
@@ -2160,7 +2214,9 @@
int ret;
old_cred = override_creds(ctx->creds);
- async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
+ async_list = io_async_list_from_req(ctx, req);
+
+ allow_kernel_signal(SIGINT);
restart:
do {
struct sqe_submit *s = &req->submit;
@@ -2170,10 +2226,21 @@
/* Ensure we clear previously set non-block flag */
req->rw.ki_flags &= ~IOCB_NOWAIT;
+ if ((req->fs && req->fs != current->fs) ||
+ (!req->fs && current->fs != old_fs_struct)) {
+ task_lock(current);
+ if (req->fs)
+ current->fs = req->fs;
+ else
+ current->fs = old_fs_struct;
+ task_unlock(current);
+ }
+
ret = 0;
- if (io_sqe_needs_user(sqe) && !cur_mm) {
+ if (io_req_needs_user(req) && !cur_mm) {
if (!mmget_not_zero(ctx->sqo_mm)) {
ret = -EFAULT;
+ goto end_req;
} else {
cur_mm = ctx->sqo_mm;
use_mm(cur_mm);
@@ -2183,6 +2250,18 @@
}
if (!ret) {
+ req->work_task = current;
+
+ /*
+ * Pairs with the smp_store_mb() (B) in
+ * io_cancel_async_work().
+ */
+ smp_mb(); /* A */
+ if (req->flags & REQ_F_CANCEL) {
+ ret = -ECANCELED;
+ goto end_req;
+ }
+
s->has_user = cur_mm != NULL;
s->needs_lock = true;
do {
@@ -2198,6 +2277,10 @@
cond_resched();
} while (1);
}
+end_req:
+ spin_lock_irq(&ctx->task_lock);
+ list_del_init(&req->task_list);
+ spin_unlock_irq(&ctx->task_lock);
/* drop submission reference */
io_put_req(req);
@@ -2262,12 +2345,18 @@
}
out:
+ disallow_signal(SIGINT);
if (cur_mm) {
set_fs(old_fs);
unuse_mm(cur_mm);
mmput(cur_mm);
}
revert_creds(old_cred);
+ if (old_fs_struct != current->fs) {
+ task_lock(current);
+ current->fs = old_fs_struct;
+ task_unlock(current);
+ }
}
/*
@@ -2297,15 +2386,24 @@
list_del_init(&req->list);
ret = false;
}
+
+ if (ret) {
+ struct io_ring_ctx *ctx = req->ctx;
+
+ req->files = current->files;
+
+ spin_lock_irq(&ctx->task_lock);
+ list_add(&req->task_list, &ctx->task_list);
+ req->work_task = NULL;
+ spin_unlock_irq(&ctx->task_lock);
+ }
spin_unlock(&list->lock);
return ret;
}
-static bool io_op_needs_file(const struct io_uring_sqe *sqe)
+static bool io_op_needs_file(struct io_kiocb *req)
{
- int op = READ_ONCE(sqe->opcode);
-
- switch (op) {
+ switch (req->submit.opcode) {
case IORING_OP_NOP:
case IORING_OP_POLL_REMOVE:
case IORING_OP_TIMEOUT:
@@ -2333,7 +2431,7 @@
*/
req->sequence = s->sequence;
- if (!io_op_needs_file(s->sqe))
+ if (!io_op_needs_file(req))
return 0;
if (flags & IOSQE_FIXED_FILE) {
@@ -2374,7 +2472,7 @@
s->sqe = sqe_copy;
memcpy(&req->submit, s, sizeof(*s));
- list = io_async_list_from_sqe(ctx, s->sqe);
+ list = io_async_list_from_req(ctx, req);
if (!io_add_to_prev_work(list, req)) {
if (list)
atomic_inc(&list->cnt);
@@ -2409,7 +2507,7 @@
{
int ret;
- ret = io_req_defer(ctx, req, s->sqe);
+ ret = io_req_defer(ctx, req, s);
if (ret) {
if (ret != -EIOCBQUEUED) {
io_free_req(req);
@@ -2436,7 +2534,7 @@
* list.
*/
req->flags |= REQ_F_IO_DRAIN;
- ret = io_req_defer(ctx, req, s->sqe);
+ ret = io_req_defer(ctx, req, s);
if (ret) {
if (ret != -EIOCBQUEUED) {
io_free_req(req);
@@ -2484,6 +2582,7 @@
goto err;
}
+ memcpy(&req->submit, s, sizeof(*s));
ret = io_req_set_file(ctx, s, state, req);
if (unlikely(ret)) {
err_req:
@@ -2495,6 +2594,23 @@
req->user_data = s->sqe->user_data;
+#if defined(CONFIG_NET)
+ switch (req->submit.opcode) {
+ case IORING_OP_SENDMSG:
+ case IORING_OP_RECVMSG:
+ spin_lock(¤t->fs->lock);
+ if (!current->fs->in_exec) {
+ req->fs = current->fs;
+ req->fs->users++;
+ }
+ spin_unlock(¤t->fs->lock);
+ if (!req->fs) {
+ ret = -EAGAIN;
+ goto err_req;
+ }
+ }
+#endif
+
/*
* If we already have a head request, queue this one for async
* submittal once the head completes. If we don't have a head but
@@ -2594,6 +2710,7 @@
if (head < ctx->sq_entries) {
s->index = head;
s->sqe = &ctx->sq_sqes[head];
+ s->opcode = READ_ONCE(s->sqe->opcode);
s->sequence = ctx->cached_sq_head;
ctx->cached_sq_head++;
return true;
@@ -2704,7 +2821,7 @@
*/
mutex_lock(&ctx->uring_lock);
if (!list_empty(&ctx->poll_list))
- __io_iopoll_check(ctx, &nr_events, 0);
+ io_iopoll_getevents(ctx, &nr_events, 0);
else
inflight = 0;
mutex_unlock(&ctx->uring_lock);
@@ -2724,16 +2841,6 @@
to_submit = io_sqring_entries(ctx);
if (!to_submit) {
/*
- * We're polling. If we're within the defined idle
- * period, then let us spin without work before going
- * to sleep.
- */
- if (inflight || !time_after(jiffies, timeout)) {
- cond_resched();
- continue;
- }
-
- /*
* Drop cur_mm before scheduling, we can't hold it for
* long periods (or over schedule()). Do this before
* adding ourselves to the waitqueue, as the unuse/drop
@@ -2745,6 +2852,16 @@
cur_mm = NULL;
}
+ /*
+ * We're polling. If we're within the defined idle
+ * period, then let us spin without work before going
+ * to sleep.
+ */
+ if (inflight || !time_after(jiffies, timeout)) {
+ cond_resched();
+ continue;
+ }
+
prepare_to_wait(&ctx->sqo_wait, &wait,
TASK_INTERRUPTIBLE);
@@ -3033,13 +3150,6 @@
struct sk_buff *skb;
int i;
- if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
- unsigned long inflight = ctx->user->unix_inflight + nr;
-
- if (inflight > task_rlimit(current, RLIMIT_NOFILE))
- return -EMFILE;
- }
-
fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
if (!fpl)
return -ENOMEM;
@@ -3174,7 +3284,6 @@
{
int ret;
- init_waitqueue_head(&ctx->sqo_wait);
mmgrab(current->mm);
ctx->sqo_mm = current->mm;
@@ -3305,6 +3414,9 @@
return SIZE_MAX;
#endif
+ if (sq_offset)
+ *sq_offset = off;
+
sq_array_size = array_size(sizeof(u32), sq_entries);
if (sq_array_size == SIZE_MAX)
return SIZE_MAX;
@@ -3312,9 +3424,6 @@
if (check_add_overflow(off, sq_array_size, &off))
return SIZE_MAX;
- if (sq_offset)
- *sq_offset = off;
-
return off;
}
@@ -3435,8 +3544,8 @@
ret = 0;
if (!pages || nr_pages > got_pages) {
- kfree(vmas);
- kfree(pages);
+ kvfree(vmas);
+ kvfree(pages);
pages = kvmalloc_array(nr_pages, sizeof(struct page *),
GFP_KERNEL);
vmas = kvmalloc_array(nr_pages,
@@ -3612,12 +3721,41 @@
return fasync_helper(fd, file, on, &ctx->cq_fasync);
}
+static void io_cancel_async_work(struct io_ring_ctx *ctx,
+ struct files_struct *files)
+{
+ struct io_kiocb *req;
+
+ if (list_empty(&ctx->task_list))
+ return;
+
+ spin_lock_irq(&ctx->task_lock);
+
+ list_for_each_entry(req, &ctx->task_list, task_list) {
+ if (files && req->files != files)
+ continue;
+
+ /*
+ * The below executes an smp_mb(), which matches with the
+ * smp_mb() (A) in io_sq_wq_submit_work() such that either
+ * we store REQ_F_CANCEL flag to req->flags or we see the
+ * req->work_task setted in io_sq_wq_submit_work().
+ */
+ smp_store_mb(req->flags, req->flags | REQ_F_CANCEL); /* B */
+
+ if (req->work_task)
+ send_sig(SIGINT, req->work_task, 1);
+ }
+ spin_unlock_irq(&ctx->task_lock);
+}
+
static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
{
mutex_lock(&ctx->uring_lock);
percpu_ref_kill(&ctx->refs);
mutex_unlock(&ctx->uring_lock);
+ io_cancel_async_work(ctx, NULL);
io_kill_timeouts(ctx);
io_poll_remove_all(ctx);
io_iopoll_reap_events(ctx);
@@ -3625,6 +3763,16 @@
io_ring_ctx_free(ctx);
}
+static int io_uring_flush(struct file *file, void *data)
+{
+ struct io_ring_ctx *ctx = file->private_data;
+
+ if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
+ io_cancel_async_work(ctx, data);
+
+ return 0;
+}
+
static int io_uring_release(struct inode *inode, struct file *file)
{
struct io_ring_ctx *ctx = file->private_data;
@@ -3704,6 +3852,9 @@
mutex_lock(&ctx->uring_lock);
submitted = io_ring_submit(ctx, to_submit);
mutex_unlock(&ctx->uring_lock);
+
+ if (submitted != to_submit)
+ goto out;
}
if (flags & IORING_ENTER_GETEVENTS) {
unsigned nr_events = 0;
@@ -3717,6 +3868,7 @@
}
}
+out:
percpu_ref_put(&ctx->refs);
out_fput:
fdput(f);
@@ -3725,6 +3877,7 @@
static const struct file_operations io_uring_fops = {
.release = io_uring_release,
+ .flush = io_uring_flush,
.mmap = io_uring_mmap,
.poll = io_uring_poll,
.fasync = io_uring_fasync,
@@ -3736,6 +3889,10 @@
struct io_rings *rings;
size_t size, sq_array_offset;
+ /* make sure these are sane, as we already accounted them */
+ ctx->sq_entries = p->sq_entries;
+ ctx->cq_entries = p->cq_entries;
+
size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
if (size == SIZE_MAX)
return -EOVERFLOW;
@@ -3752,16 +3909,20 @@
rings->cq_ring_entries = p->cq_entries;
ctx->sq_mask = rings->sq_ring_mask;
ctx->cq_mask = rings->cq_ring_mask;
- ctx->sq_entries = rings->sq_ring_entries;
- ctx->cq_entries = rings->cq_ring_entries;
size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
- if (size == SIZE_MAX)
+ if (size == SIZE_MAX) {
+ io_mem_free(ctx->rings);
+ ctx->rings = NULL;
return -EOVERFLOW;
+ }
ctx->sq_sqes = io_mem_alloc(size);
- if (!ctx->sq_sqes)
+ if (!ctx->sq_sqes) {
+ io_mem_free(ctx->rings);
+ ctx->rings = NULL;
return -ENOMEM;
+ }
return 0;
}
@@ -3853,7 +4014,7 @@
ctx->account_mem = account_mem;
ctx->user = user;
- ctx->creds = prepare_creds();
+ ctx->creds = get_current_cred();
if (!ctx->creds) {
ret = -ENOMEM;
goto err;