Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_TIME64_H |
| 3 | #define _LINUX_TIME64_H |
| 4 | |
| 5 | #include <linux/math64.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 6 | #include <vdso/time64.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7 | |
| 8 | typedef __s64 time64_t; |
| 9 | typedef __u64 timeu64_t; |
| 10 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11 | #include <uapi/linux/time.h> |
| 12 | |
| 13 | struct timespec64 { |
| 14 | time64_t tv_sec; /* seconds */ |
| 15 | long tv_nsec; /* nanoseconds */ |
| 16 | }; |
| 17 | |
| 18 | struct itimerspec64 { |
| 19 | struct timespec64 it_interval; |
| 20 | struct timespec64 it_value; |
| 21 | }; |
| 22 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 23 | /* Located here for timespec[64]_valid_strict */ |
| 24 | #define TIME64_MAX ((s64)~((u64)1 << 63)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 25 | #define TIME64_MIN (-TIME64_MAX - 1) |
| 26 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 27 | #define KTIME_MAX ((s64)~((u64)1 << 63)) |
| 28 | #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) |
| 29 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 30 | /* |
| 31 | * Limits for settimeofday(): |
| 32 | * |
| 33 | * To prevent setting the time close to the wraparound point time setting |
| 34 | * is limited so a reasonable uptime can be accomodated. Uptime of 30 years |
| 35 | * should be really sufficient, which means the cutoff is 2232. At that |
| 36 | * point the cutoff is just a small part of the larger problem. |
| 37 | */ |
| 38 | #define TIME_UPTIME_SEC_MAX (30LL * 365 * 24 *3600) |
| 39 | #define TIME_SETTOD_SEC_MAX (KTIME_SEC_MAX - TIME_UPTIME_SEC_MAX) |
| 40 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 41 | static inline int timespec64_equal(const struct timespec64 *a, |
| 42 | const struct timespec64 *b) |
| 43 | { |
| 44 | return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); |
| 45 | } |
| 46 | |
| 47 | /* |
| 48 | * lhs < rhs: return <0 |
| 49 | * lhs == rhs: return 0 |
| 50 | * lhs > rhs: return >0 |
| 51 | */ |
| 52 | static inline int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs) |
| 53 | { |
| 54 | if (lhs->tv_sec < rhs->tv_sec) |
| 55 | return -1; |
| 56 | if (lhs->tv_sec > rhs->tv_sec) |
| 57 | return 1; |
| 58 | return lhs->tv_nsec - rhs->tv_nsec; |
| 59 | } |
| 60 | |
| 61 | extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec); |
| 62 | |
| 63 | static inline struct timespec64 timespec64_add(struct timespec64 lhs, |
| 64 | struct timespec64 rhs) |
| 65 | { |
| 66 | struct timespec64 ts_delta; |
| 67 | set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec, |
| 68 | lhs.tv_nsec + rhs.tv_nsec); |
| 69 | return ts_delta; |
| 70 | } |
| 71 | |
| 72 | /* |
| 73 | * sub = lhs - rhs, in normalized form |
| 74 | */ |
| 75 | static inline struct timespec64 timespec64_sub(struct timespec64 lhs, |
| 76 | struct timespec64 rhs) |
| 77 | { |
| 78 | struct timespec64 ts_delta; |
| 79 | set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec, |
| 80 | lhs.tv_nsec - rhs.tv_nsec); |
| 81 | return ts_delta; |
| 82 | } |
| 83 | |
| 84 | /* |
| 85 | * Returns true if the timespec64 is norm, false if denorm: |
| 86 | */ |
| 87 | static inline bool timespec64_valid(const struct timespec64 *ts) |
| 88 | { |
| 89 | /* Dates before 1970 are bogus */ |
| 90 | if (ts->tv_sec < 0) |
| 91 | return false; |
| 92 | /* Can't have more nanoseconds then a second */ |
| 93 | if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) |
| 94 | return false; |
| 95 | return true; |
| 96 | } |
| 97 | |
| 98 | static inline bool timespec64_valid_strict(const struct timespec64 *ts) |
| 99 | { |
| 100 | if (!timespec64_valid(ts)) |
| 101 | return false; |
| 102 | /* Disallow values that could overflow ktime_t */ |
| 103 | if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) |
| 104 | return false; |
| 105 | return true; |
| 106 | } |
| 107 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 108 | static inline bool timespec64_valid_settod(const struct timespec64 *ts) |
| 109 | { |
| 110 | if (!timespec64_valid(ts)) |
| 111 | return false; |
| 112 | /* Disallow values which cause overflow issues vs. CLOCK_REALTIME */ |
| 113 | if ((unsigned long long)ts->tv_sec >= TIME_SETTOD_SEC_MAX) |
| 114 | return false; |
| 115 | return true; |
| 116 | } |
| 117 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 118 | /** |
| 119 | * timespec64_to_ns - Convert timespec64 to nanoseconds |
| 120 | * @ts: pointer to the timespec64 variable to be converted |
| 121 | * |
| 122 | * Returns the scalar nanosecond representation of the timespec64 |
| 123 | * parameter. |
| 124 | */ |
| 125 | static inline s64 timespec64_to_ns(const struct timespec64 *ts) |
| 126 | { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 127 | /* Prevent multiplication overflow */ |
| 128 | if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) |
| 129 | return KTIME_MAX; |
| 130 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 131 | return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; |
| 132 | } |
| 133 | |
| 134 | /** |
| 135 | * ns_to_timespec64 - Convert nanoseconds to timespec64 |
| 136 | * @nsec: the nanoseconds value to be converted |
| 137 | * |
| 138 | * Returns the timespec64 representation of the nsec parameter. |
| 139 | */ |
| 140 | extern struct timespec64 ns_to_timespec64(const s64 nsec); |
| 141 | |
| 142 | /** |
| 143 | * timespec64_add_ns - Adds nanoseconds to a timespec64 |
| 144 | * @a: pointer to timespec64 to be incremented |
| 145 | * @ns: unsigned nanoseconds value to be added |
| 146 | * |
| 147 | * This must always be inlined because its used from the x86-64 vdso, |
| 148 | * which cannot call other kernel functions. |
| 149 | */ |
| 150 | static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns) |
| 151 | { |
| 152 | a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); |
| 153 | a->tv_nsec = ns; |
| 154 | } |
| 155 | |
| 156 | /* |
| 157 | * timespec64_add_safe assumes both values are positive and checks for |
| 158 | * overflow. It will return TIME64_MAX in case of overflow. |
| 159 | */ |
| 160 | extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs, |
| 161 | const struct timespec64 rhs); |
| 162 | |
| 163 | #endif /* _LINUX_TIME64_H */ |