blob: e51796063cb6eacf6256688fb9a8a6650e5cf140 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * This file contains the procedures for the handling of select and poll
4 *
5 * Created for Linux based loosely upon Mathius Lattner's minix
6 * patches by Peter MacDonald. Heavily edited by Linus.
7 *
8 * 4 February 1994
9 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
10 * flag set in its personality we do *not* modify the given timeout
11 * parameter to reflect time remaining.
12 *
13 * 24 January 2000
14 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
15 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
16 */
17
18#include <linux/kernel.h>
19#include <linux/sched/signal.h>
20#include <linux/sched/rt.h>
21#include <linux/syscalls.h>
22#include <linux/export.h>
23#include <linux/slab.h>
24#include <linux/poll.h>
25#include <linux/personality.h> /* for STICKY_TIMEOUTS */
26#include <linux/file.h>
27#include <linux/fdtable.h>
28#include <linux/fs.h>
29#include <linux/rcupdate.h>
30#include <linux/hrtimer.h>
31#include <linux/freezer.h>
32#include <net/busy_poll.h>
33#include <linux/vmalloc.h>
34
35#include <linux/uaccess.h>
36
37
38/*
39 * Estimate expected accuracy in ns from a timeval.
40 *
41 * After quite a bit of churning around, we've settled on
42 * a simple thing of taking 0.1% of the timeout as the
43 * slack, with a cap of 100 msec.
44 * "nice" tasks get a 0.5% slack instead.
45 *
46 * Consider this comment an open invitation to come up with even
47 * better solutions..
48 */
49
50#define MAX_SLACK (100 * NSEC_PER_MSEC)
51
52static long __estimate_accuracy(struct timespec64 *tv)
53{
54 long slack;
55 int divfactor = 1000;
56
57 if (tv->tv_sec < 0)
58 return 0;
59
60 if (task_nice(current) > 0)
61 divfactor = divfactor / 5;
62
63 if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
64 return MAX_SLACK;
65
66 slack = tv->tv_nsec / divfactor;
67 slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
68
69 if (slack > MAX_SLACK)
70 return MAX_SLACK;
71
72 return slack;
73}
74
75u64 select_estimate_accuracy(struct timespec64 *tv)
76{
77 u64 ret;
78 struct timespec64 now;
79
80 /*
81 * Realtime tasks get a slack of 0 for obvious reasons.
82 */
83
84 if (rt_task(current))
85 return 0;
86
87 ktime_get_ts64(&now);
88 now = timespec64_sub(*tv, now);
89 ret = __estimate_accuracy(&now);
90 if (ret < current->timer_slack_ns)
91 return current->timer_slack_ns;
92 return ret;
93}
94
95
96
97struct poll_table_page {
98 struct poll_table_page * next;
99 struct poll_table_entry * entry;
100 struct poll_table_entry entries[0];
101};
102
103#define POLL_TABLE_FULL(table) \
104 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
105
106/*
107 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
108 * I have rewritten this, taking some shortcuts: This code may not be easy to
109 * follow, but it should be free of race-conditions, and it's practical. If you
110 * understand what I'm doing here, then you understand how the linux
111 * sleep/wakeup mechanism works.
112 *
113 * Two very simple procedures, poll_wait() and poll_freewait() make all the
114 * work. poll_wait() is an inline-function defined in <linux/poll.h>,
115 * as all select/poll functions have to call it to add an entry to the
116 * poll table.
117 */
118static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
119 poll_table *p);
120
121void poll_initwait(struct poll_wqueues *pwq)
122{
123 init_poll_funcptr(&pwq->pt, __pollwait);
124 pwq->polling_task = current;
125 pwq->triggered = 0;
126 pwq->error = 0;
127 pwq->table = NULL;
128 pwq->inline_index = 0;
129}
130EXPORT_SYMBOL(poll_initwait);
131
132static void free_poll_entry(struct poll_table_entry *entry)
133{
134 remove_wait_queue(entry->wait_address, &entry->wait);
135 fput(entry->filp);
136}
137
138void poll_freewait(struct poll_wqueues *pwq)
139{
140 struct poll_table_page * p = pwq->table;
141 int i;
142 for (i = 0; i < pwq->inline_index; i++)
143 free_poll_entry(pwq->inline_entries + i);
144 while (p) {
145 struct poll_table_entry * entry;
146 struct poll_table_page *old;
147
148 entry = p->entry;
149 do {
150 entry--;
151 free_poll_entry(entry);
152 } while (entry > p->entries);
153 old = p;
154 p = p->next;
155 free_page((unsigned long) old);
156 }
157}
158EXPORT_SYMBOL(poll_freewait);
159
160static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
161{
162 struct poll_table_page *table = p->table;
163
164 if (p->inline_index < N_INLINE_POLL_ENTRIES)
165 return p->inline_entries + p->inline_index++;
166
167 if (!table || POLL_TABLE_FULL(table)) {
168 struct poll_table_page *new_table;
169
170 new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
171 if (!new_table) {
172 p->error = -ENOMEM;
173 return NULL;
174 }
175 new_table->entry = new_table->entries;
176 new_table->next = table;
177 p->table = new_table;
178 table = new_table;
179 }
180
181 return table->entry++;
182}
183
184static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
185{
186 struct poll_wqueues *pwq = wait->private;
187 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
188
189 /*
190 * Although this function is called under waitqueue lock, LOCK
191 * doesn't imply write barrier and the users expect write
192 * barrier semantics on wakeup functions. The following
193 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
194 * and is paired with smp_store_mb() in poll_schedule_timeout.
195 */
196 smp_wmb();
197 pwq->triggered = 1;
198
199 /*
200 * Perform the default wake up operation using a dummy
201 * waitqueue.
202 *
203 * TODO: This is hacky but there currently is no interface to
204 * pass in @sync. @sync is scheduled to be removed and once
205 * that happens, wake_up_process() can be used directly.
206 */
207 return default_wake_function(&dummy_wait, mode, sync, key);
208}
209
210static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
211{
212 struct poll_table_entry *entry;
213
214 entry = container_of(wait, struct poll_table_entry, wait);
215 if (key && !(key_to_poll(key) & entry->key))
216 return 0;
217 return __pollwake(wait, mode, sync, key);
218}
219
220/* Add a new entry */
221static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
222 poll_table *p)
223{
224 struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
225 struct poll_table_entry *entry = poll_get_entry(pwq);
226 if (!entry)
227 return;
228 entry->filp = get_file(filp);
229 entry->wait_address = wait_address;
230 entry->key = p->_key;
231 init_waitqueue_func_entry(&entry->wait, pollwake);
232 entry->wait.private = pwq;
233 add_wait_queue(wait_address, &entry->wait);
234}
235
236static int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
237 ktime_t *expires, unsigned long slack)
238{
239 int rc = -EINTR;
240
241 set_current_state(state);
242 if (!pwq->triggered)
243 rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
244 __set_current_state(TASK_RUNNING);
245
246 /*
247 * Prepare for the next iteration.
248 *
249 * The following smp_store_mb() serves two purposes. First, it's
250 * the counterpart rmb of the wmb in pollwake() such that data
251 * written before wake up is always visible after wake up.
252 * Second, the full barrier guarantees that triggered clearing
253 * doesn't pass event check of the next iteration. Note that
254 * this problem doesn't exist for the first iteration as
255 * add_wait_queue() has full barrier semantics.
256 */
257 smp_store_mb(pwq->triggered, 0);
258
259 return rc;
260}
261
262/**
263 * poll_select_set_timeout - helper function to setup the timeout value
264 * @to: pointer to timespec64 variable for the final timeout
265 * @sec: seconds (from user space)
266 * @nsec: nanoseconds (from user space)
267 *
268 * Note, we do not use a timespec for the user space value here, That
269 * way we can use the function for timeval and compat interfaces as well.
270 *
271 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
272 */
273int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec)
274{
275 struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec};
276
277 if (!timespec64_valid(&ts))
278 return -EINVAL;
279
280 /* Optimize for the zero timeout value here */
281 if (!sec && !nsec) {
282 to->tv_sec = to->tv_nsec = 0;
283 } else {
284 ktime_get_ts64(to);
285 *to = timespec64_add_safe(*to, ts);
286 }
287 return 0;
288}
289
David Brazdil0f672f62019-12-10 10:32:29 +0000290enum poll_time_type {
291 PT_TIMEVAL = 0,
292 PT_OLD_TIMEVAL = 1,
293 PT_TIMESPEC = 2,
294 PT_OLD_TIMESPEC = 3,
295};
296
297static int poll_select_finish(struct timespec64 *end_time,
298 void __user *p,
299 enum poll_time_type pt_type, int ret)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000300{
301 struct timespec64 rts;
David Brazdil0f672f62019-12-10 10:32:29 +0000302
303 restore_saved_sigmask_unless(ret == -ERESTARTNOHAND);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000304
305 if (!p)
306 return ret;
307
308 if (current->personality & STICKY_TIMEOUTS)
309 goto sticky;
310
311 /* No update for zero timeout */
312 if (!end_time->tv_sec && !end_time->tv_nsec)
313 return ret;
314
315 ktime_get_ts64(&rts);
316 rts = timespec64_sub(*end_time, rts);
317 if (rts.tv_sec < 0)
318 rts.tv_sec = rts.tv_nsec = 0;
319
320
David Brazdil0f672f62019-12-10 10:32:29 +0000321 switch (pt_type) {
322 case PT_TIMEVAL:
323 {
324 struct timeval rtv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000325
David Brazdil0f672f62019-12-10 10:32:29 +0000326 if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
327 memset(&rtv, 0, sizeof(rtv));
328 rtv.tv_sec = rts.tv_sec;
329 rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
330 if (!copy_to_user(p, &rtv, sizeof(rtv)))
331 return ret;
332 }
333 break;
334 case PT_OLD_TIMEVAL:
335 {
336 struct old_timeval32 rtv;
337
338 rtv.tv_sec = rts.tv_sec;
339 rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
340 if (!copy_to_user(p, &rtv, sizeof(rtv)))
341 return ret;
342 }
343 break;
344 case PT_TIMESPEC:
345 if (!put_timespec64(&rts, p))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000346 return ret;
David Brazdil0f672f62019-12-10 10:32:29 +0000347 break;
348 case PT_OLD_TIMESPEC:
349 if (!put_old_timespec32(&rts, p))
350 return ret;
351 break;
352 default:
353 BUG();
354 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000355 /*
356 * If an application puts its timeval in read-only memory, we
357 * don't want the Linux-specific update to the timeval to
358 * cause a fault after the select has completed
359 * successfully. However, because we're not updating the
360 * timeval, we can't restart the system call.
361 */
362
363sticky:
364 if (ret == -ERESTARTNOHAND)
365 ret = -EINTR;
366 return ret;
367}
368
369/*
370 * Scalable version of the fd_set.
371 */
372
373typedef struct {
374 unsigned long *in, *out, *ex;
375 unsigned long *res_in, *res_out, *res_ex;
376} fd_set_bits;
377
378/*
379 * How many longwords for "nr" bits?
380 */
381#define FDS_BITPERLONG (8*sizeof(long))
382#define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
383#define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long))
384
385/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000386 * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
387 */
388static inline
389int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
390{
391 nr = FDS_BYTES(nr);
392 if (ufdset)
393 return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0;
394
395 memset(fdset, 0, nr);
396 return 0;
397}
398
399static inline unsigned long __must_check
400set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
401{
402 if (ufdset)
403 return __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
404 return 0;
405}
406
407static inline
408void zero_fd_set(unsigned long nr, unsigned long *fdset)
409{
410 memset(fdset, 0, FDS_BYTES(nr));
411}
412
413#define FDS_IN(fds, n) (fds->in + n)
414#define FDS_OUT(fds, n) (fds->out + n)
415#define FDS_EX(fds, n) (fds->ex + n)
416
417#define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
418
419static int max_select_fd(unsigned long n, fd_set_bits *fds)
420{
421 unsigned long *open_fds;
422 unsigned long set;
423 int max;
424 struct fdtable *fdt;
425
426 /* handle last in-complete long-word first */
427 set = ~(~0UL << (n & (BITS_PER_LONG-1)));
428 n /= BITS_PER_LONG;
429 fdt = files_fdtable(current->files);
430 open_fds = fdt->open_fds + n;
431 max = 0;
432 if (set) {
433 set &= BITS(fds, n);
434 if (set) {
435 if (!(set & ~*open_fds))
436 goto get_max;
437 return -EBADF;
438 }
439 }
440 while (n) {
441 open_fds--;
442 n--;
443 set = BITS(fds, n);
444 if (!set)
445 continue;
446 if (set & ~*open_fds)
447 return -EBADF;
448 if (max)
449 continue;
450get_max:
451 do {
452 max++;
453 set >>= 1;
454 } while (set);
455 max += n * BITS_PER_LONG;
456 }
457
458 return max;
459}
460
461#define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR)
462#define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR)
463#define POLLEX_SET (EPOLLPRI)
464
465static inline void wait_key_set(poll_table *wait, unsigned long in,
466 unsigned long out, unsigned long bit,
467 __poll_t ll_flag)
468{
469 wait->_key = POLLEX_SET | ll_flag;
470 if (in & bit)
471 wait->_key |= POLLIN_SET;
472 if (out & bit)
473 wait->_key |= POLLOUT_SET;
474}
475
476static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
477{
478 ktime_t expire, *to = NULL;
479 struct poll_wqueues table;
480 poll_table *wait;
481 int retval, i, timed_out = 0;
482 u64 slack = 0;
483 __poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
484 unsigned long busy_start = 0;
485
486 rcu_read_lock();
487 retval = max_select_fd(n, fds);
488 rcu_read_unlock();
489
490 if (retval < 0)
491 return retval;
492 n = retval;
493
494 poll_initwait(&table);
495 wait = &table.pt;
496 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
497 wait->_qproc = NULL;
498 timed_out = 1;
499 }
500
501 if (end_time && !timed_out)
502 slack = select_estimate_accuracy(end_time);
503
504 retval = 0;
505 for (;;) {
506 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
507 bool can_busy_loop = false;
508
509 inp = fds->in; outp = fds->out; exp = fds->ex;
510 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
511
512 for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
513 unsigned long in, out, ex, all_bits, bit = 1, j;
514 unsigned long res_in = 0, res_out = 0, res_ex = 0;
515 __poll_t mask;
516
517 in = *inp++; out = *outp++; ex = *exp++;
518 all_bits = in | out | ex;
519 if (all_bits == 0) {
520 i += BITS_PER_LONG;
521 continue;
522 }
523
524 for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
525 struct fd f;
526 if (i >= n)
527 break;
528 if (!(bit & all_bits))
529 continue;
530 f = fdget(i);
531 if (f.file) {
532 wait_key_set(wait, in, out, bit,
533 busy_flag);
534 mask = vfs_poll(f.file, wait);
535
536 fdput(f);
537 if ((mask & POLLIN_SET) && (in & bit)) {
538 res_in |= bit;
539 retval++;
540 wait->_qproc = NULL;
541 }
542 if ((mask & POLLOUT_SET) && (out & bit)) {
543 res_out |= bit;
544 retval++;
545 wait->_qproc = NULL;
546 }
547 if ((mask & POLLEX_SET) && (ex & bit)) {
548 res_ex |= bit;
549 retval++;
550 wait->_qproc = NULL;
551 }
552 /* got something, stop busy polling */
553 if (retval) {
554 can_busy_loop = false;
555 busy_flag = 0;
556
557 /*
558 * only remember a returned
559 * POLL_BUSY_LOOP if we asked for it
560 */
561 } else if (busy_flag & mask)
562 can_busy_loop = true;
563
564 }
565 }
566 if (res_in)
567 *rinp = res_in;
568 if (res_out)
569 *routp = res_out;
570 if (res_ex)
571 *rexp = res_ex;
572 cond_resched();
573 }
574 wait->_qproc = NULL;
575 if (retval || timed_out || signal_pending(current))
576 break;
577 if (table.error) {
578 retval = table.error;
579 break;
580 }
581
582 /* only if found POLL_BUSY_LOOP sockets && not out of time */
583 if (can_busy_loop && !need_resched()) {
584 if (!busy_start) {
585 busy_start = busy_loop_current_time();
586 continue;
587 }
588 if (!busy_loop_timeout(busy_start))
589 continue;
590 }
591 busy_flag = 0;
592
593 /*
594 * If this is the first loop and we have a timeout
595 * given, then we convert to ktime_t and set the to
596 * pointer to the expiry value.
597 */
598 if (end_time && !to) {
599 expire = timespec64_to_ktime(*end_time);
600 to = &expire;
601 }
602
603 if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
604 to, slack))
605 timed_out = 1;
606 }
607
608 poll_freewait(&table);
609
610 return retval;
611}
612
613/*
614 * We can actually return ERESTARTSYS instead of EINTR, but I'd
615 * like to be certain this leads to no problems. So I return
616 * EINTR just for safety.
617 *
618 * Update: ERESTARTSYS breaks at least the xview clock binary, so
619 * I'm trying ERESTARTNOHAND which restart only when you want to.
620 */
621int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
622 fd_set __user *exp, struct timespec64 *end_time)
623{
624 fd_set_bits fds;
625 void *bits;
626 int ret, max_fds;
627 size_t size, alloc_size;
628 struct fdtable *fdt;
629 /* Allocate small arguments on the stack to save memory and be faster */
630 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
631
632 ret = -EINVAL;
633 if (n < 0)
634 goto out_nofds;
635
636 /* max_fds can increase, so grab it once to avoid race */
637 rcu_read_lock();
638 fdt = files_fdtable(current->files);
639 max_fds = fdt->max_fds;
640 rcu_read_unlock();
641 if (n > max_fds)
642 n = max_fds;
643
644 /*
645 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
646 * since we used fdset we need to allocate memory in units of
647 * long-words.
648 */
649 size = FDS_BYTES(n);
650 bits = stack_fds;
651 if (size > sizeof(stack_fds) / 6) {
652 /* Not enough space in on-stack array; must use kmalloc */
653 ret = -ENOMEM;
654 if (size > (SIZE_MAX / 6))
655 goto out_nofds;
656
657 alloc_size = 6 * size;
658 bits = kvmalloc(alloc_size, GFP_KERNEL);
659 if (!bits)
660 goto out_nofds;
661 }
662 fds.in = bits;
663 fds.out = bits + size;
664 fds.ex = bits + 2*size;
665 fds.res_in = bits + 3*size;
666 fds.res_out = bits + 4*size;
667 fds.res_ex = bits + 5*size;
668
669 if ((ret = get_fd_set(n, inp, fds.in)) ||
670 (ret = get_fd_set(n, outp, fds.out)) ||
671 (ret = get_fd_set(n, exp, fds.ex)))
672 goto out;
673 zero_fd_set(n, fds.res_in);
674 zero_fd_set(n, fds.res_out);
675 zero_fd_set(n, fds.res_ex);
676
677 ret = do_select(n, &fds, end_time);
678
679 if (ret < 0)
680 goto out;
681 if (!ret) {
682 ret = -ERESTARTNOHAND;
683 if (signal_pending(current))
684 goto out;
685 ret = 0;
686 }
687
688 if (set_fd_set(n, inp, fds.res_in) ||
689 set_fd_set(n, outp, fds.res_out) ||
690 set_fd_set(n, exp, fds.res_ex))
691 ret = -EFAULT;
692
693out:
694 if (bits != stack_fds)
695 kvfree(bits);
696out_nofds:
697 return ret;
698}
699
700static int kern_select(int n, fd_set __user *inp, fd_set __user *outp,
701 fd_set __user *exp, struct timeval __user *tvp)
702{
703 struct timespec64 end_time, *to = NULL;
704 struct timeval tv;
705 int ret;
706
707 if (tvp) {
708 if (copy_from_user(&tv, tvp, sizeof(tv)))
709 return -EFAULT;
710
711 to = &end_time;
712 if (poll_select_set_timeout(to,
713 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
714 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
715 return -EINVAL;
716 }
717
718 ret = core_sys_select(n, inp, outp, exp, to);
David Brazdil0f672f62019-12-10 10:32:29 +0000719 return poll_select_finish(&end_time, tvp, PT_TIMEVAL, ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000720}
721
722SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
723 fd_set __user *, exp, struct timeval __user *, tvp)
724{
725 return kern_select(n, inp, outp, exp, tvp);
726}
727
728static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
David Brazdil0f672f62019-12-10 10:32:29 +0000729 fd_set __user *exp, void __user *tsp,
730 const sigset_t __user *sigmask, size_t sigsetsize,
731 enum poll_time_type type)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000732{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000733 struct timespec64 ts, end_time, *to = NULL;
734 int ret;
735
736 if (tsp) {
David Brazdil0f672f62019-12-10 10:32:29 +0000737 switch (type) {
738 case PT_TIMESPEC:
739 if (get_timespec64(&ts, tsp))
740 return -EFAULT;
741 break;
742 case PT_OLD_TIMESPEC:
743 if (get_old_timespec32(&ts, tsp))
744 return -EFAULT;
745 break;
746 default:
747 BUG();
748 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000749
750 to = &end_time;
751 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
752 return -EINVAL;
753 }
754
David Brazdil0f672f62019-12-10 10:32:29 +0000755 ret = set_user_sigmask(sigmask, sigsetsize);
756 if (ret)
757 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000758
759 ret = core_sys_select(n, inp, outp, exp, to);
David Brazdil0f672f62019-12-10 10:32:29 +0000760 return poll_select_finish(&end_time, tsp, type, ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000761}
762
763/*
764 * Most architectures can't handle 7-argument syscalls. So we provide a
765 * 6-argument version where the sixth argument is a pointer to a structure
766 * which has a pointer to the sigset_t itself followed by a size_t containing
767 * the sigset size.
768 */
769SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
David Brazdil0f672f62019-12-10 10:32:29 +0000770 fd_set __user *, exp, struct __kernel_timespec __user *, tsp,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000771 void __user *, sig)
772{
773 size_t sigsetsize = 0;
774 sigset_t __user *up = NULL;
775
776 if (sig) {
David Brazdil0f672f62019-12-10 10:32:29 +0000777 if (!access_ok(sig, sizeof(void *)+sizeof(size_t))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000778 || __get_user(up, (sigset_t __user * __user *)sig)
779 || __get_user(sigsetsize,
780 (size_t __user *)(sig+sizeof(void *))))
781 return -EFAULT;
782 }
783
David Brazdil0f672f62019-12-10 10:32:29 +0000784 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize, PT_TIMESPEC);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000785}
786
David Brazdil0f672f62019-12-10 10:32:29 +0000787#if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
788
789SYSCALL_DEFINE6(pselect6_time32, int, n, fd_set __user *, inp, fd_set __user *, outp,
790 fd_set __user *, exp, struct old_timespec32 __user *, tsp,
791 void __user *, sig)
792{
793 size_t sigsetsize = 0;
794 sigset_t __user *up = NULL;
795
796 if (sig) {
797 if (!access_ok(sig, sizeof(void *)+sizeof(size_t))
798 || __get_user(up, (sigset_t __user * __user *)sig)
799 || __get_user(sigsetsize,
800 (size_t __user *)(sig+sizeof(void *))))
801 return -EFAULT;
802 }
803
804 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize, PT_OLD_TIMESPEC);
805}
806
807#endif
808
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000809#ifdef __ARCH_WANT_SYS_OLD_SELECT
810struct sel_arg_struct {
811 unsigned long n;
812 fd_set __user *inp, *outp, *exp;
813 struct timeval __user *tvp;
814};
815
816SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
817{
818 struct sel_arg_struct a;
819
820 if (copy_from_user(&a, arg, sizeof(a)))
821 return -EFAULT;
822 return kern_select(a.n, a.inp, a.outp, a.exp, a.tvp);
823}
824#endif
825
826struct poll_list {
827 struct poll_list *next;
828 int len;
829 struct pollfd entries[0];
830};
831
832#define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
833
834/*
835 * Fish for pollable events on the pollfd->fd file descriptor. We're only
836 * interested in events matching the pollfd->events mask, and the result
837 * matching that mask is both recorded in pollfd->revents and returned. The
838 * pwait poll_table will be used by the fd-provided poll handler for waiting,
839 * if pwait->_qproc is non-NULL.
840 */
841static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait,
842 bool *can_busy_poll,
843 __poll_t busy_flag)
844{
845 int fd = pollfd->fd;
846 __poll_t mask = 0, filter;
847 struct fd f;
848
849 if (fd < 0)
850 goto out;
851 mask = EPOLLNVAL;
852 f = fdget(fd);
853 if (!f.file)
854 goto out;
855
856 /* userland u16 ->events contains POLL... bitmap */
857 filter = demangle_poll(pollfd->events) | EPOLLERR | EPOLLHUP;
858 pwait->_key = filter | busy_flag;
859 mask = vfs_poll(f.file, pwait);
860 if (mask & busy_flag)
861 *can_busy_poll = true;
862 mask &= filter; /* Mask out unneeded events. */
863 fdput(f);
864
865out:
866 /* ... and so does ->revents */
867 pollfd->revents = mangle_poll(mask);
868 return mask;
869}
870
871static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
872 struct timespec64 *end_time)
873{
874 poll_table* pt = &wait->pt;
875 ktime_t expire, *to = NULL;
876 int timed_out = 0, count = 0;
877 u64 slack = 0;
878 __poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
879 unsigned long busy_start = 0;
880
881 /* Optimise the no-wait case */
882 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
883 pt->_qproc = NULL;
884 timed_out = 1;
885 }
886
887 if (end_time && !timed_out)
888 slack = select_estimate_accuracy(end_time);
889
890 for (;;) {
891 struct poll_list *walk;
892 bool can_busy_loop = false;
893
894 for (walk = list; walk != NULL; walk = walk->next) {
895 struct pollfd * pfd, * pfd_end;
896
897 pfd = walk->entries;
898 pfd_end = pfd + walk->len;
899 for (; pfd != pfd_end; pfd++) {
900 /*
901 * Fish for events. If we found one, record it
902 * and kill poll_table->_qproc, so we don't
903 * needlessly register any other waiters after
904 * this. They'll get immediately deregistered
905 * when we break out and return.
906 */
907 if (do_pollfd(pfd, pt, &can_busy_loop,
908 busy_flag)) {
909 count++;
910 pt->_qproc = NULL;
911 /* found something, stop busy polling */
912 busy_flag = 0;
913 can_busy_loop = false;
914 }
915 }
916 }
917 /*
918 * All waiters have already been registered, so don't provide
919 * a poll_table->_qproc to them on the next loop iteration.
920 */
921 pt->_qproc = NULL;
922 if (!count) {
923 count = wait->error;
924 if (signal_pending(current))
David Brazdil0f672f62019-12-10 10:32:29 +0000925 count = -ERESTARTNOHAND;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000926 }
927 if (count || timed_out)
928 break;
929
930 /* only if found POLL_BUSY_LOOP sockets && not out of time */
931 if (can_busy_loop && !need_resched()) {
932 if (!busy_start) {
933 busy_start = busy_loop_current_time();
934 continue;
935 }
936 if (!busy_loop_timeout(busy_start))
937 continue;
938 }
939 busy_flag = 0;
940
941 /*
942 * If this is the first loop and we have a timeout
943 * given, then we convert to ktime_t and set the to
944 * pointer to the expiry value.
945 */
946 if (end_time && !to) {
947 expire = timespec64_to_ktime(*end_time);
948 to = &expire;
949 }
950
951 if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
952 timed_out = 1;
953 }
954 return count;
955}
956
957#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \
958 sizeof(struct pollfd))
959
960static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
961 struct timespec64 *end_time)
962{
963 struct poll_wqueues table;
David Brazdil0f672f62019-12-10 10:32:29 +0000964 int err = -EFAULT, fdcount, len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000965 /* Allocate small arguments on the stack to save memory and be
966 faster - use long to make sure the buffer is aligned properly
967 on 64 bit archs to avoid unaligned access */
968 long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
969 struct poll_list *const head = (struct poll_list *)stack_pps;
970 struct poll_list *walk = head;
971 unsigned long todo = nfds;
972
973 if (nfds > rlimit(RLIMIT_NOFILE))
974 return -EINVAL;
975
976 len = min_t(unsigned int, nfds, N_STACK_PPS);
977 for (;;) {
978 walk->next = NULL;
979 walk->len = len;
980 if (!len)
981 break;
982
983 if (copy_from_user(walk->entries, ufds + nfds-todo,
984 sizeof(struct pollfd) * walk->len))
985 goto out_fds;
986
987 todo -= walk->len;
988 if (!todo)
989 break;
990
991 len = min(todo, POLLFD_PER_PAGE);
David Brazdil0f672f62019-12-10 10:32:29 +0000992 walk = walk->next = kmalloc(struct_size(walk, entries, len),
993 GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000994 if (!walk) {
995 err = -ENOMEM;
996 goto out_fds;
997 }
998 }
999
1000 poll_initwait(&table);
1001 fdcount = do_poll(head, &table, end_time);
1002 poll_freewait(&table);
1003
1004 for (walk = head; walk; walk = walk->next) {
1005 struct pollfd *fds = walk->entries;
1006 int j;
1007
1008 for (j = 0; j < walk->len; j++, ufds++)
1009 if (__put_user(fds[j].revents, &ufds->revents))
1010 goto out_fds;
1011 }
1012
1013 err = fdcount;
1014out_fds:
1015 walk = head->next;
1016 while (walk) {
1017 struct poll_list *pos = walk;
1018 walk = walk->next;
1019 kfree(pos);
1020 }
1021
1022 return err;
1023}
1024
1025static long do_restart_poll(struct restart_block *restart_block)
1026{
1027 struct pollfd __user *ufds = restart_block->poll.ufds;
1028 int nfds = restart_block->poll.nfds;
1029 struct timespec64 *to = NULL, end_time;
1030 int ret;
1031
1032 if (restart_block->poll.has_timeout) {
1033 end_time.tv_sec = restart_block->poll.tv_sec;
1034 end_time.tv_nsec = restart_block->poll.tv_nsec;
1035 to = &end_time;
1036 }
1037
1038 ret = do_sys_poll(ufds, nfds, to);
1039
Olivier Deprez0e641232021-09-23 10:07:05 +02001040 if (ret == -ERESTARTNOHAND)
1041 ret = set_restart_fn(restart_block, do_restart_poll);
1042
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001043 return ret;
1044}
1045
1046SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
1047 int, timeout_msecs)
1048{
1049 struct timespec64 end_time, *to = NULL;
1050 int ret;
1051
1052 if (timeout_msecs >= 0) {
1053 to = &end_time;
1054 poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
1055 NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
1056 }
1057
1058 ret = do_sys_poll(ufds, nfds, to);
1059
David Brazdil0f672f62019-12-10 10:32:29 +00001060 if (ret == -ERESTARTNOHAND) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001061 struct restart_block *restart_block;
1062
1063 restart_block = &current->restart_block;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001064 restart_block->poll.ufds = ufds;
1065 restart_block->poll.nfds = nfds;
1066
1067 if (timeout_msecs >= 0) {
1068 restart_block->poll.tv_sec = end_time.tv_sec;
1069 restart_block->poll.tv_nsec = end_time.tv_nsec;
1070 restart_block->poll.has_timeout = 1;
1071 } else
1072 restart_block->poll.has_timeout = 0;
1073
Olivier Deprez0e641232021-09-23 10:07:05 +02001074 ret = set_restart_fn(restart_block, do_restart_poll);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001075 }
1076 return ret;
1077}
1078
1079SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
David Brazdil0f672f62019-12-10 10:32:29 +00001080 struct __kernel_timespec __user *, tsp, const sigset_t __user *, sigmask,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001081 size_t, sigsetsize)
1082{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001083 struct timespec64 ts, end_time, *to = NULL;
1084 int ret;
1085
1086 if (tsp) {
1087 if (get_timespec64(&ts, tsp))
1088 return -EFAULT;
1089
1090 to = &end_time;
1091 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1092 return -EINVAL;
1093 }
1094
David Brazdil0f672f62019-12-10 10:32:29 +00001095 ret = set_user_sigmask(sigmask, sigsetsize);
1096 if (ret)
1097 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001098
1099 ret = do_sys_poll(ufds, nfds, to);
David Brazdil0f672f62019-12-10 10:32:29 +00001100 return poll_select_finish(&end_time, tsp, PT_TIMESPEC, ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001101}
1102
David Brazdil0f672f62019-12-10 10:32:29 +00001103#if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
1104
1105SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds, unsigned int, nfds,
1106 struct old_timespec32 __user *, tsp, const sigset_t __user *, sigmask,
1107 size_t, sigsetsize)
1108{
1109 struct timespec64 ts, end_time, *to = NULL;
1110 int ret;
1111
1112 if (tsp) {
1113 if (get_old_timespec32(&ts, tsp))
1114 return -EFAULT;
1115
1116 to = &end_time;
1117 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1118 return -EINVAL;
1119 }
1120
1121 ret = set_user_sigmask(sigmask, sigsetsize);
1122 if (ret)
1123 return ret;
1124
1125 ret = do_sys_poll(ufds, nfds, to);
1126 return poll_select_finish(&end_time, tsp, PT_OLD_TIMESPEC, ret);
1127}
1128#endif
1129
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001130#ifdef CONFIG_COMPAT
1131#define __COMPAT_NFDBITS (8 * sizeof(compat_ulong_t))
1132
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001133/*
1134 * Ooo, nasty. We need here to frob 32-bit unsigned longs to
1135 * 64-bit unsigned longs.
1136 */
1137static
1138int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1139 unsigned long *fdset)
1140{
1141 if (ufdset) {
1142 return compat_get_bitmap(fdset, ufdset, nr);
1143 } else {
1144 zero_fd_set(nr, fdset);
1145 return 0;
1146 }
1147}
1148
1149static
1150int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1151 unsigned long *fdset)
1152{
1153 if (!ufdset)
1154 return 0;
1155 return compat_put_bitmap(ufdset, fdset, nr);
1156}
1157
1158
1159/*
1160 * This is a virtual copy of sys_select from fs/select.c and probably
1161 * should be compared to it from time to time
1162 */
1163
1164/*
1165 * We can actually return ERESTARTSYS instead of EINTR, but I'd
1166 * like to be certain this leads to no problems. So I return
1167 * EINTR just for safety.
1168 *
1169 * Update: ERESTARTSYS breaks at least the xview clock binary, so
1170 * I'm trying ERESTARTNOHAND which restart only when you want to.
1171 */
1172static int compat_core_sys_select(int n, compat_ulong_t __user *inp,
1173 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1174 struct timespec64 *end_time)
1175{
1176 fd_set_bits fds;
1177 void *bits;
1178 int size, max_fds, ret = -EINVAL;
1179 struct fdtable *fdt;
1180 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
1181
1182 if (n < 0)
1183 goto out_nofds;
1184
1185 /* max_fds can increase, so grab it once to avoid race */
1186 rcu_read_lock();
1187 fdt = files_fdtable(current->files);
1188 max_fds = fdt->max_fds;
1189 rcu_read_unlock();
1190 if (n > max_fds)
1191 n = max_fds;
1192
1193 /*
1194 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
1195 * since we used fdset we need to allocate memory in units of
1196 * long-words.
1197 */
1198 size = FDS_BYTES(n);
1199 bits = stack_fds;
1200 if (size > sizeof(stack_fds) / 6) {
1201 bits = kmalloc_array(6, size, GFP_KERNEL);
1202 ret = -ENOMEM;
1203 if (!bits)
1204 goto out_nofds;
1205 }
1206 fds.in = (unsigned long *) bits;
1207 fds.out = (unsigned long *) (bits + size);
1208 fds.ex = (unsigned long *) (bits + 2*size);
1209 fds.res_in = (unsigned long *) (bits + 3*size);
1210 fds.res_out = (unsigned long *) (bits + 4*size);
1211 fds.res_ex = (unsigned long *) (bits + 5*size);
1212
1213 if ((ret = compat_get_fd_set(n, inp, fds.in)) ||
1214 (ret = compat_get_fd_set(n, outp, fds.out)) ||
1215 (ret = compat_get_fd_set(n, exp, fds.ex)))
1216 goto out;
1217 zero_fd_set(n, fds.res_in);
1218 zero_fd_set(n, fds.res_out);
1219 zero_fd_set(n, fds.res_ex);
1220
1221 ret = do_select(n, &fds, end_time);
1222
1223 if (ret < 0)
1224 goto out;
1225 if (!ret) {
1226 ret = -ERESTARTNOHAND;
1227 if (signal_pending(current))
1228 goto out;
1229 ret = 0;
1230 }
1231
1232 if (compat_set_fd_set(n, inp, fds.res_in) ||
1233 compat_set_fd_set(n, outp, fds.res_out) ||
1234 compat_set_fd_set(n, exp, fds.res_ex))
1235 ret = -EFAULT;
1236out:
1237 if (bits != stack_fds)
1238 kfree(bits);
1239out_nofds:
1240 return ret;
1241}
1242
1243static int do_compat_select(int n, compat_ulong_t __user *inp,
1244 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
David Brazdil0f672f62019-12-10 10:32:29 +00001245 struct old_timeval32 __user *tvp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001246{
1247 struct timespec64 end_time, *to = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00001248 struct old_timeval32 tv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001249 int ret;
1250
1251 if (tvp) {
1252 if (copy_from_user(&tv, tvp, sizeof(tv)))
1253 return -EFAULT;
1254
1255 to = &end_time;
1256 if (poll_select_set_timeout(to,
1257 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
1258 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
1259 return -EINVAL;
1260 }
1261
1262 ret = compat_core_sys_select(n, inp, outp, exp, to);
David Brazdil0f672f62019-12-10 10:32:29 +00001263 return poll_select_finish(&end_time, tvp, PT_OLD_TIMEVAL, ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001264}
1265
1266COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp,
1267 compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
David Brazdil0f672f62019-12-10 10:32:29 +00001268 struct old_timeval32 __user *, tvp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001269{
1270 return do_compat_select(n, inp, outp, exp, tvp);
1271}
1272
1273struct compat_sel_arg_struct {
1274 compat_ulong_t n;
1275 compat_uptr_t inp;
1276 compat_uptr_t outp;
1277 compat_uptr_t exp;
1278 compat_uptr_t tvp;
1279};
1280
1281COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg)
1282{
1283 struct compat_sel_arg_struct a;
1284
1285 if (copy_from_user(&a, arg, sizeof(a)))
1286 return -EFAULT;
1287 return do_compat_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
1288 compat_ptr(a.exp), compat_ptr(a.tvp));
1289}
1290
1291static long do_compat_pselect(int n, compat_ulong_t __user *inp,
1292 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
David Brazdil0f672f62019-12-10 10:32:29 +00001293 void __user *tsp, compat_sigset_t __user *sigmask,
1294 compat_size_t sigsetsize, enum poll_time_type type)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001295{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001296 struct timespec64 ts, end_time, *to = NULL;
1297 int ret;
1298
1299 if (tsp) {
David Brazdil0f672f62019-12-10 10:32:29 +00001300 switch (type) {
1301 case PT_OLD_TIMESPEC:
1302 if (get_old_timespec32(&ts, tsp))
1303 return -EFAULT;
1304 break;
1305 case PT_TIMESPEC:
1306 if (get_timespec64(&ts, tsp))
1307 return -EFAULT;
1308 break;
1309 default:
1310 BUG();
1311 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001312
1313 to = &end_time;
1314 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1315 return -EINVAL;
1316 }
1317
David Brazdil0f672f62019-12-10 10:32:29 +00001318 ret = set_compat_user_sigmask(sigmask, sigsetsize);
1319 if (ret)
1320 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001321
1322 ret = compat_core_sys_select(n, inp, outp, exp, to);
David Brazdil0f672f62019-12-10 10:32:29 +00001323 return poll_select_finish(&end_time, tsp, type, ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001324}
1325
David Brazdil0f672f62019-12-10 10:32:29 +00001326COMPAT_SYSCALL_DEFINE6(pselect6_time64, int, n, compat_ulong_t __user *, inp,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001327 compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
David Brazdil0f672f62019-12-10 10:32:29 +00001328 struct __kernel_timespec __user *, tsp, void __user *, sig)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001329{
1330 compat_size_t sigsetsize = 0;
1331 compat_uptr_t up = 0;
1332
1333 if (sig) {
David Brazdil0f672f62019-12-10 10:32:29 +00001334 if (!access_ok(sig,
1335 sizeof(compat_uptr_t)+sizeof(compat_size_t)) ||
1336 __get_user(up, (compat_uptr_t __user *)sig) ||
1337 __get_user(sigsetsize,
1338 (compat_size_t __user *)(sig+sizeof(up))))
1339 return -EFAULT;
1340 }
1341
1342 return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up),
1343 sigsetsize, PT_TIMESPEC);
1344}
1345
1346#if defined(CONFIG_COMPAT_32BIT_TIME)
1347
1348COMPAT_SYSCALL_DEFINE6(pselect6_time32, int, n, compat_ulong_t __user *, inp,
1349 compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
1350 struct old_timespec32 __user *, tsp, void __user *, sig)
1351{
1352 compat_size_t sigsetsize = 0;
1353 compat_uptr_t up = 0;
1354
1355 if (sig) {
1356 if (!access_ok(sig,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001357 sizeof(compat_uptr_t)+sizeof(compat_size_t)) ||
1358 __get_user(up, (compat_uptr_t __user *)sig) ||
1359 __get_user(sigsetsize,
1360 (compat_size_t __user *)(sig+sizeof(up))))
1361 return -EFAULT;
1362 }
David Brazdil0f672f62019-12-10 10:32:29 +00001363
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001364 return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up),
David Brazdil0f672f62019-12-10 10:32:29 +00001365 sigsetsize, PT_OLD_TIMESPEC);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001366}
1367
David Brazdil0f672f62019-12-10 10:32:29 +00001368#endif
1369
1370#if defined(CONFIG_COMPAT_32BIT_TIME)
1371COMPAT_SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds,
1372 unsigned int, nfds, struct old_timespec32 __user *, tsp,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001373 const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
1374{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001375 struct timespec64 ts, end_time, *to = NULL;
1376 int ret;
1377
1378 if (tsp) {
David Brazdil0f672f62019-12-10 10:32:29 +00001379 if (get_old_timespec32(&ts, tsp))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001380 return -EFAULT;
1381
1382 to = &end_time;
1383 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1384 return -EINVAL;
1385 }
1386
David Brazdil0f672f62019-12-10 10:32:29 +00001387 ret = set_compat_user_sigmask(sigmask, sigsetsize);
1388 if (ret)
1389 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001390
1391 ret = do_sys_poll(ufds, nfds, to);
David Brazdil0f672f62019-12-10 10:32:29 +00001392 return poll_select_finish(&end_time, tsp, PT_OLD_TIMESPEC, ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001393}
1394#endif
David Brazdil0f672f62019-12-10 10:32:29 +00001395
1396/* New compat syscall for 64 bit time_t*/
1397COMPAT_SYSCALL_DEFINE5(ppoll_time64, struct pollfd __user *, ufds,
1398 unsigned int, nfds, struct __kernel_timespec __user *, tsp,
1399 const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
1400{
1401 struct timespec64 ts, end_time, *to = NULL;
1402 int ret;
1403
1404 if (tsp) {
1405 if (get_timespec64(&ts, tsp))
1406 return -EFAULT;
1407
1408 to = &end_time;
1409 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1410 return -EINVAL;
1411 }
1412
1413 ret = set_compat_user_sigmask(sigmask, sigsetsize);
1414 if (ret)
1415 return ret;
1416
1417 ret = do_sys_poll(ufds, nfds, to);
1418 return poll_select_finish(&end_time, tsp, PT_TIMESPEC, ret);
1419}
1420
1421#endif