David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Generic userspace implementations of gettimeofday() and similar. |
| 4 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5 | #include <vdso/datapage.h> |
| 6 | #include <vdso/helpers.h> |
| 7 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8 | #ifndef vdso_calc_delta |
| 9 | /* |
| 10 | * Default implementation which works for all sane clocksources. That |
| 11 | * obviously excludes x86/TSC. |
| 12 | */ |
| 13 | static __always_inline |
| 14 | u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult) |
| 15 | { |
| 16 | return ((cycles - last) & mask) * mult; |
| 17 | } |
| 18 | #endif |
| 19 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 20 | #ifndef vdso_shift_ns |
| 21 | static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift) |
| 22 | { |
| 23 | return ns >> shift; |
| 24 | } |
| 25 | #endif |
| 26 | |
| 27 | #ifndef __arch_vdso_hres_capable |
| 28 | static inline bool __arch_vdso_hres_capable(void) |
| 29 | { |
| 30 | return true; |
| 31 | } |
| 32 | #endif |
| 33 | |
| 34 | #ifndef vdso_clocksource_ok |
| 35 | static inline bool vdso_clocksource_ok(const struct vdso_data *vd) |
| 36 | { |
| 37 | return vd->clock_mode != VDSO_CLOCKMODE_NONE; |
| 38 | } |
| 39 | #endif |
| 40 | |
| 41 | #ifndef vdso_cycles_ok |
| 42 | static inline bool vdso_cycles_ok(u64 cycles) |
| 43 | { |
| 44 | return true; |
| 45 | } |
| 46 | #endif |
| 47 | |
| 48 | #ifdef CONFIG_TIME_NS |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame] | 49 | static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, |
| 50 | struct __kernel_timespec *ts) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 51 | { |
| 52 | const struct vdso_data *vd = __arch_get_timens_vdso_data(); |
| 53 | const struct timens_offset *offs = &vdns->offset[clk]; |
| 54 | const struct vdso_timestamp *vdso_ts; |
| 55 | u64 cycles, last, ns; |
| 56 | u32 seq; |
| 57 | s64 sec; |
| 58 | |
| 59 | if (clk != CLOCK_MONOTONIC_RAW) |
| 60 | vd = &vd[CS_HRES_COARSE]; |
| 61 | else |
| 62 | vd = &vd[CS_RAW]; |
| 63 | vdso_ts = &vd->basetime[clk]; |
| 64 | |
| 65 | do { |
| 66 | seq = vdso_read_begin(vd); |
| 67 | |
| 68 | if (unlikely(!vdso_clocksource_ok(vd))) |
| 69 | return -1; |
| 70 | |
| 71 | cycles = __arch_get_hw_counter(vd->clock_mode, vd); |
| 72 | if (unlikely(!vdso_cycles_ok(cycles))) |
| 73 | return -1; |
| 74 | ns = vdso_ts->nsec; |
| 75 | last = vd->cycle_last; |
| 76 | ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); |
| 77 | ns = vdso_shift_ns(ns, vd->shift); |
| 78 | sec = vdso_ts->sec; |
| 79 | } while (unlikely(vdso_read_retry(vd, seq))); |
| 80 | |
| 81 | /* Add the namespace offset */ |
| 82 | sec += offs->sec; |
| 83 | ns += offs->nsec; |
| 84 | |
| 85 | /* |
| 86 | * Do this outside the loop: a race inside the loop could result |
| 87 | * in __iter_div_u64_rem() being extremely slow. |
| 88 | */ |
| 89 | ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); |
| 90 | ts->tv_nsec = ns; |
| 91 | |
| 92 | return 0; |
| 93 | } |
| 94 | #else |
| 95 | static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void) |
| 96 | { |
| 97 | return NULL; |
| 98 | } |
| 99 | |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame] | 100 | static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, |
| 101 | struct __kernel_timespec *ts) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 102 | { |
| 103 | return -EINVAL; |
| 104 | } |
| 105 | #endif |
| 106 | |
| 107 | static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk, |
| 108 | struct __kernel_timespec *ts) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 109 | { |
| 110 | const struct vdso_timestamp *vdso_ts = &vd->basetime[clk]; |
| 111 | u64 cycles, last, sec, ns; |
| 112 | u32 seq; |
| 113 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 114 | /* Allows to compile the high resolution parts out */ |
| 115 | if (!__arch_vdso_hres_capable()) |
| 116 | return -1; |
| 117 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 118 | do { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 119 | /* |
| 120 | * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace |
| 121 | * enabled tasks have a special VVAR page installed which |
| 122 | * has vd->seq set to 1 and vd->clock_mode set to |
| 123 | * VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks |
| 124 | * this does not affect performance because if vd->seq is |
| 125 | * odd, i.e. a concurrent update is in progress the extra |
| 126 | * check for vd->clock_mode is just a few extra |
| 127 | * instructions while spin waiting for vd->seq to become |
| 128 | * even again. |
| 129 | */ |
| 130 | while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) { |
| 131 | if (IS_ENABLED(CONFIG_TIME_NS) && |
| 132 | vd->clock_mode == VDSO_CLOCKMODE_TIMENS) |
| 133 | return do_hres_timens(vd, clk, ts); |
| 134 | cpu_relax(); |
| 135 | } |
| 136 | smp_rmb(); |
| 137 | |
| 138 | if (unlikely(!vdso_clocksource_ok(vd))) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 139 | return -1; |
| 140 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 141 | cycles = __arch_get_hw_counter(vd->clock_mode, vd); |
| 142 | if (unlikely(!vdso_cycles_ok(cycles))) |
| 143 | return -1; |
| 144 | ns = vdso_ts->nsec; |
| 145 | last = vd->cycle_last; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 146 | ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 147 | ns = vdso_shift_ns(ns, vd->shift); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 148 | sec = vdso_ts->sec; |
| 149 | } while (unlikely(vdso_read_retry(vd, seq))); |
| 150 | |
| 151 | /* |
| 152 | * Do this outside the loop: a race inside the loop could result |
| 153 | * in __iter_div_u64_rem() being extremely slow. |
| 154 | */ |
| 155 | ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); |
| 156 | ts->tv_nsec = ns; |
| 157 | |
| 158 | return 0; |
| 159 | } |
| 160 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 161 | #ifdef CONFIG_TIME_NS |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame] | 162 | static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, |
| 163 | struct __kernel_timespec *ts) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 164 | { |
| 165 | const struct vdso_data *vd = __arch_get_timens_vdso_data(); |
| 166 | const struct vdso_timestamp *vdso_ts = &vd->basetime[clk]; |
| 167 | const struct timens_offset *offs = &vdns->offset[clk]; |
| 168 | u64 nsec; |
| 169 | s64 sec; |
| 170 | s32 seq; |
| 171 | |
| 172 | do { |
| 173 | seq = vdso_read_begin(vd); |
| 174 | sec = vdso_ts->sec; |
| 175 | nsec = vdso_ts->nsec; |
| 176 | } while (unlikely(vdso_read_retry(vd, seq))); |
| 177 | |
| 178 | /* Add the namespace offset */ |
| 179 | sec += offs->sec; |
| 180 | nsec += offs->nsec; |
| 181 | |
| 182 | /* |
| 183 | * Do this outside the loop: a race inside the loop could result |
| 184 | * in __iter_div_u64_rem() being extremely slow. |
| 185 | */ |
| 186 | ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec); |
| 187 | ts->tv_nsec = nsec; |
| 188 | return 0; |
| 189 | } |
| 190 | #else |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame] | 191 | static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, |
| 192 | struct __kernel_timespec *ts) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 193 | { |
| 194 | return -1; |
| 195 | } |
| 196 | #endif |
| 197 | |
| 198 | static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk, |
| 199 | struct __kernel_timespec *ts) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 200 | { |
| 201 | const struct vdso_timestamp *vdso_ts = &vd->basetime[clk]; |
| 202 | u32 seq; |
| 203 | |
| 204 | do { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 205 | /* |
| 206 | * Open coded to handle VDSO_CLOCK_TIMENS. See comment in |
| 207 | * do_hres(). |
| 208 | */ |
| 209 | while ((seq = READ_ONCE(vd->seq)) & 1) { |
| 210 | if (IS_ENABLED(CONFIG_TIME_NS) && |
| 211 | vd->clock_mode == VDSO_CLOCKMODE_TIMENS) |
| 212 | return do_coarse_timens(vd, clk, ts); |
| 213 | cpu_relax(); |
| 214 | } |
| 215 | smp_rmb(); |
| 216 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 217 | ts->tv_sec = vdso_ts->sec; |
| 218 | ts->tv_nsec = vdso_ts->nsec; |
| 219 | } while (unlikely(vdso_read_retry(vd, seq))); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 220 | |
| 221 | return 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 222 | } |
| 223 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 224 | static __always_inline int |
| 225 | __cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock, |
| 226 | struct __kernel_timespec *ts) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 227 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 228 | u32 msk; |
| 229 | |
| 230 | /* Check for negative values or invalid clocks */ |
| 231 | if (unlikely((u32) clock >= MAX_CLOCKS)) |
| 232 | return -1; |
| 233 | |
| 234 | /* |
| 235 | * Convert the clockid to a bitmask and use it to check which |
| 236 | * clocks are handled in the VDSO directly. |
| 237 | */ |
| 238 | msk = 1U << clock; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 239 | if (likely(msk & VDSO_HRES)) |
| 240 | vd = &vd[CS_HRES_COARSE]; |
| 241 | else if (msk & VDSO_COARSE) |
| 242 | return do_coarse(&vd[CS_HRES_COARSE], clock, ts); |
| 243 | else if (msk & VDSO_RAW) |
| 244 | vd = &vd[CS_RAW]; |
| 245 | else |
| 246 | return -1; |
| 247 | |
| 248 | return do_hres(vd, clock, ts); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 249 | } |
| 250 | |
| 251 | static __maybe_unused int |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 252 | __cvdso_clock_gettime_data(const struct vdso_data *vd, clockid_t clock, |
| 253 | struct __kernel_timespec *ts) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 254 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 255 | int ret = __cvdso_clock_gettime_common(vd, clock, ts); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 256 | |
| 257 | if (unlikely(ret)) |
| 258 | return clock_gettime_fallback(clock, ts); |
| 259 | return 0; |
| 260 | } |
| 261 | |
| 262 | static __maybe_unused int |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 263 | __cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) |
| 264 | { |
| 265 | return __cvdso_clock_gettime_data(__arch_get_vdso_data(), clock, ts); |
| 266 | } |
| 267 | |
| 268 | #ifdef BUILD_VDSO32 |
| 269 | static __maybe_unused int |
| 270 | __cvdso_clock_gettime32_data(const struct vdso_data *vd, clockid_t clock, |
| 271 | struct old_timespec32 *res) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 272 | { |
| 273 | struct __kernel_timespec ts; |
| 274 | int ret; |
| 275 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 276 | ret = __cvdso_clock_gettime_common(vd, clock, &ts); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 277 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 278 | if (unlikely(ret)) |
| 279 | return clock_gettime32_fallback(clock, res); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 280 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 281 | /* For ret == 0 */ |
| 282 | res->tv_sec = ts.tv_sec; |
| 283 | res->tv_nsec = ts.tv_nsec; |
| 284 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 285 | return ret; |
| 286 | } |
| 287 | |
| 288 | static __maybe_unused int |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 289 | __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 290 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 291 | return __cvdso_clock_gettime32_data(__arch_get_vdso_data(), clock, res); |
| 292 | } |
| 293 | #endif /* BUILD_VDSO32 */ |
| 294 | |
| 295 | static __maybe_unused int |
| 296 | __cvdso_gettimeofday_data(const struct vdso_data *vd, |
| 297 | struct __kernel_old_timeval *tv, struct timezone *tz) |
| 298 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 299 | |
| 300 | if (likely(tv != NULL)) { |
| 301 | struct __kernel_timespec ts; |
| 302 | |
| 303 | if (do_hres(&vd[CS_HRES_COARSE], CLOCK_REALTIME, &ts)) |
| 304 | return gettimeofday_fallback(tv, tz); |
| 305 | |
| 306 | tv->tv_sec = ts.tv_sec; |
| 307 | tv->tv_usec = (u32)ts.tv_nsec / NSEC_PER_USEC; |
| 308 | } |
| 309 | |
| 310 | if (unlikely(tz != NULL)) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 311 | if (IS_ENABLED(CONFIG_TIME_NS) && |
| 312 | vd->clock_mode == VDSO_CLOCKMODE_TIMENS) |
| 313 | vd = __arch_get_timens_vdso_data(); |
| 314 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 315 | tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest; |
| 316 | tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime; |
| 317 | } |
| 318 | |
| 319 | return 0; |
| 320 | } |
| 321 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 322 | static __maybe_unused int |
| 323 | __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 324 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 325 | return __cvdso_gettimeofday_data(__arch_get_vdso_data(), tv, tz); |
| 326 | } |
| 327 | |
| 328 | #ifdef VDSO_HAS_TIME |
| 329 | static __maybe_unused __kernel_old_time_t |
| 330 | __cvdso_time_data(const struct vdso_data *vd, __kernel_old_time_t *time) |
| 331 | { |
| 332 | __kernel_old_time_t t; |
| 333 | |
| 334 | if (IS_ENABLED(CONFIG_TIME_NS) && |
| 335 | vd->clock_mode == VDSO_CLOCKMODE_TIMENS) |
| 336 | vd = __arch_get_timens_vdso_data(); |
| 337 | |
| 338 | t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 339 | |
| 340 | if (time) |
| 341 | *time = t; |
| 342 | |
| 343 | return t; |
| 344 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 345 | |
| 346 | static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time) |
| 347 | { |
| 348 | return __cvdso_time_data(__arch_get_vdso_data(), time); |
| 349 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 350 | #endif /* VDSO_HAS_TIME */ |
| 351 | |
| 352 | #ifdef VDSO_HAS_CLOCK_GETRES |
| 353 | static __maybe_unused |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 354 | int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock, |
| 355 | struct __kernel_timespec *res) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 356 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 357 | u32 msk; |
| 358 | u64 ns; |
| 359 | |
| 360 | /* Check for negative values or invalid clocks */ |
| 361 | if (unlikely((u32) clock >= MAX_CLOCKS)) |
| 362 | return -1; |
| 363 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 364 | if (IS_ENABLED(CONFIG_TIME_NS) && |
| 365 | vd->clock_mode == VDSO_CLOCKMODE_TIMENS) |
| 366 | vd = __arch_get_timens_vdso_data(); |
| 367 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 368 | /* |
| 369 | * Convert the clockid to a bitmask and use it to check which |
| 370 | * clocks are handled in the VDSO directly. |
| 371 | */ |
| 372 | msk = 1U << clock; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 373 | if (msk & (VDSO_HRES | VDSO_RAW)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 374 | /* |
| 375 | * Preserves the behaviour of posix_get_hrtimer_res(). |
| 376 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 377 | ns = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 378 | } else if (msk & VDSO_COARSE) { |
| 379 | /* |
| 380 | * Preserves the behaviour of posix_get_coarse_res(). |
| 381 | */ |
| 382 | ns = LOW_RES_NSEC; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 383 | } else { |
| 384 | return -1; |
| 385 | } |
| 386 | |
| 387 | if (likely(res)) { |
| 388 | res->tv_sec = 0; |
| 389 | res->tv_nsec = ns; |
| 390 | } |
| 391 | return 0; |
| 392 | } |
| 393 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 394 | static __maybe_unused |
| 395 | int __cvdso_clock_getres_data(const struct vdso_data *vd, clockid_t clock, |
| 396 | struct __kernel_timespec *res) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 397 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 398 | int ret = __cvdso_clock_getres_common(vd, clock, res); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 399 | |
| 400 | if (unlikely(ret)) |
| 401 | return clock_getres_fallback(clock, res); |
| 402 | return 0; |
| 403 | } |
| 404 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 405 | static __maybe_unused |
| 406 | int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res) |
| 407 | { |
| 408 | return __cvdso_clock_getres_data(__arch_get_vdso_data(), clock, res); |
| 409 | } |
| 410 | |
| 411 | #ifdef BUILD_VDSO32 |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 412 | static __maybe_unused int |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 413 | __cvdso_clock_getres_time32_data(const struct vdso_data *vd, clockid_t clock, |
| 414 | struct old_timespec32 *res) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 415 | { |
| 416 | struct __kernel_timespec ts; |
| 417 | int ret; |
| 418 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 419 | ret = __cvdso_clock_getres_common(vd, clock, &ts); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 420 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 421 | if (unlikely(ret)) |
| 422 | return clock_getres32_fallback(clock, res); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 423 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 424 | if (likely(res)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 425 | res->tv_sec = ts.tv_sec; |
| 426 | res->tv_nsec = ts.tv_nsec; |
| 427 | } |
| 428 | return ret; |
| 429 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 430 | |
| 431 | static __maybe_unused int |
| 432 | __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res) |
| 433 | { |
| 434 | return __cvdso_clock_getres_time32_data(__arch_get_vdso_data(), |
| 435 | clock, res); |
| 436 | } |
| 437 | #endif /* BUILD_VDSO32 */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 438 | #endif /* VDSO_HAS_CLOCK_GETRES */ |