blob: 5830b5ad408276e538402595ec9876ea82ed1fec [file] [log] [blame]
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02001/*
Boyan Karatotev794b0ac2025-06-20 13:13:29 +01002 * Copyright (c) 2018-2025, Arm Limited. All rights reserved.
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <arch_helpers.h>
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +02009#include <debug.h>
Antonio Nino Diaz09a00ef2019-01-11 13:12:58 +000010#include <drivers/arm/arm_gic.h>
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020011#include <errno.h>
12#include <irq.h>
13#include <mmio.h>
14#include <platform.h>
15#include <platform_def.h>
16#include <power_management.h>
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020017#include <spinlock.h>
18#include <stddef.h>
Ambroise Vincent602b7f52019-02-11 14:13:43 +000019#include <stdint.h>
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020020#include <tftf.h>
21#include <timer.h>
22
23
24/* Helper macros */
25#define TIMER_STEP_VALUE (plat_timer_info->timer_step_value)
26#define TIMER_IRQ (plat_timer_info->timer_irq)
27#define PROGRAM_TIMER(a) plat_timer_info->program(a)
28#define INVALID_CORE UINT32_MAX
29#define INVALID_TIME UINT64_MAX
30#define MAX_TIME_OUT_MS 10000
31
32/*
33 * Pointer containing available timer information for the platform.
34 */
35static const plat_timer_t *plat_timer_info;
36/*
37 * Interrupt requested time by cores in terms of absolute time.
38 */
39static volatile unsigned long long interrupt_req_time[PLATFORM_CORE_COUNT];
40/*
41 * Contains the target core number of the timer interrupt.
42 */
43static unsigned int current_prog_core = INVALID_CORE;
44/*
45 * Lock to get a consistent view for programming the timer
46 */
47static spinlock_t timer_lock;
48/*
49 * Number of system ticks per millisec
50 */
51static unsigned int systicks_per_ms;
52
53/*
54 * Stores per CPU timer handler invoked on expiration of the requested timeout.
55 */
56static irq_handler_t timer_handler[PLATFORM_CORE_COUNT];
57
58/* Helper function */
59static inline unsigned long long get_current_time_ms(void)
60{
61 assert(systicks_per_ms);
Varun Wadekar5904da42020-05-22 10:52:23 -070062 return syscounter_read() / systicks_per_ms;
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020063}
64
65static inline unsigned long long get_current_prog_time(void)
66{
67 return current_prog_core == INVALID_CORE ?
68 0 : interrupt_req_time[current_prog_core];
69}
70
71int tftf_initialise_timer(void)
72{
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020073 /*
74 * Get platform specific timer information
75 */
Sandrine Bailleuxdfa5ed92018-12-13 17:08:50 +010076 int rc = plat_initialise_timer_ops(&plat_timer_info);
77 if (rc != 0) {
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020078 return rc;
79 }
80
81 /* Systems can't support single tick as a step value */
82 assert(TIMER_STEP_VALUE);
83
84 /* Initialise the array to max possible time */
Sandrine Bailleuxdfa5ed92018-12-13 17:08:50 +010085 for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; i++)
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +020086 interrupt_req_time[i] = INVALID_TIME;
87
88 tftf_irq_register_handler(TIMER_IRQ, tftf_timer_framework_handler);
89 arm_gic_set_intr_priority(TIMER_IRQ, GIC_HIGHEST_NS_PRIORITY);
90 arm_gic_intr_enable(TIMER_IRQ);
91
92 /* Save the systicks per millisecond */
93 systicks_per_ms = read_cntfrq_el0() / 1000;
94
95 return 0;
96}
97
Jens Wiklander5a440782024-06-25 12:36:20 +020098void tftf_initialise_timer_secondary_core(void)
99{
100 if (!IS_SPI(TIMER_IRQ)) {
101 arm_gic_set_intr_priority(TIMER_IRQ, GIC_HIGHEST_NS_PRIORITY);
102 arm_gic_intr_enable(TIMER_IRQ);
103 }
104}
105
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200106/*
107 * It returns the core number of next timer request to be serviced or
108 * -1 if there is no request from any core. The next service request
109 * is the core whose interrupt needs to be fired first.
110 */
111static inline unsigned int get_lowest_req_core(void)
112{
113 unsigned long long lowest_timer = INVALID_TIME;
114 unsigned int lowest_core_req = INVALID_CORE;
115 unsigned int i;
116
117 /*
118 * If 2 cores requested same value, give precedence
119 * to the core with lowest core number
120 */
121 for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
122 if (interrupt_req_time[i] < lowest_timer) {
123 lowest_timer = interrupt_req_time[i];
124 lowest_core_req = i;
125 }
126 }
127
128 return lowest_core_req;
129}
130
131int tftf_program_timer(unsigned long time_out_ms)
132{
133 unsigned int core_pos;
134 unsigned long long current_time;
135 u_register_t flags;
136 int rc = 0;
137
138 /*
139 * Some timer implementations have a very small max timeouts due to
140 * this if a request is asked for greater than the max time supported
141 * by them either it has to be broken down and remembered or use
142 * some other technique. Since that use case is not intended and
143 * and to make the timer framework simple, max timeout requests
144 * accepted by timer implementations can't be greater than
145 * 10 seconds. Hence, all timer peripherals used in timer framework
146 * has to support a timeout with interval of at least MAX_TIMEOUT.
147 */
148 if ((time_out_ms > MAX_TIME_OUT_MS) || (time_out_ms == 0)) {
149 ERROR("%s : Greater than max timeout request\n", __func__);
150 return -1;
151 } else if (time_out_ms < TIMER_STEP_VALUE) {
152 time_out_ms = TIMER_STEP_VALUE;
153 }
154
155 core_pos = platform_get_core_pos(read_mpidr_el1());
156 /* A timer interrupt request is already available for the core */
157 assert(interrupt_req_time[core_pos] == INVALID_TIME);
158
159 flags = read_daif();
160 disable_irq();
161 spin_lock(&timer_lock);
162
163 assert((current_prog_core < PLATFORM_CORE_COUNT) ||
164 (current_prog_core == INVALID_CORE));
165
166 /*
167 * Read time after acquiring timer_lock to account for any time taken
168 * by lock contention.
169 */
170 current_time = get_current_time_ms();
171
172 /* Update the requested time */
173 interrupt_req_time[core_pos] = current_time + time_out_ms;
174
175 VERBOSE("Need timer interrupt at: %lld current_prog_time:%lld\n"
176 " current time: %lld\n", interrupt_req_time[core_pos],
177 get_current_prog_time(),
178 get_current_time_ms());
179
180 /*
181 * If the interrupt request time is less than the current programmed
182 * by timer_step_value or timer is not programmed. Program it with
183 * requested time and retarget the timer interrupt to the current
184 * core.
185 */
186 if ((!get_current_prog_time()) || (interrupt_req_time[core_pos] <
187 (get_current_prog_time() - TIMER_STEP_VALUE))) {
188
Jens Wiklander5a440782024-06-25 12:36:20 +0200189 if (IS_SPI(TIMER_IRQ)) {
190 arm_gic_set_intr_target(TIMER_IRQ, core_pos);
191 }
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200192
193 rc = PROGRAM_TIMER(time_out_ms);
194 /* We don't expect timer programming to fail */
195 if (rc)
196 ERROR("%s %d: rc = %d\n", __func__, __LINE__, rc);
197
198 current_prog_core = core_pos;
199 }
200
201 spin_unlock(&timer_lock);
202 /* Restore DAIF flags */
203 write_daif(flags);
204 isb();
205
206 return rc;
207}
208
209int tftf_program_timer_and_suspend(unsigned long milli_secs,
210 unsigned int pwr_state,
211 int *timer_rc, int *suspend_rc)
212{
213 int rc = 0;
214 u_register_t flags;
215
216 /* Default to successful return codes */
217 int timer_rc_val = 0;
218 int suspend_rc_val = PSCI_E_SUCCESS;
219
220 /* Preserve DAIF flags. IRQs need to be disabled for this to work. */
221 flags = read_daif();
222 disable_irq();
223
224 /*
225 * Even with IRQs masked, the timer IRQ will wake the CPU up.
226 *
227 * If the timer IRQ happens before entering suspend mode (because the
228 * timer took too long to program, for example) the fact that the IRQ is
229 * pending will prevent the CPU from entering suspend mode and not being
230 * able to wake up.
231 */
232 timer_rc_val = tftf_program_timer(milli_secs);
233 if (timer_rc_val == 0) {
234 suspend_rc_val = tftf_cpu_suspend(pwr_state);
235 if (suspend_rc_val != PSCI_E_SUCCESS) {
236 rc = -1;
237 INFO("%s %d: suspend_rc = %d\n", __func__, __LINE__,
238 suspend_rc_val);
239 }
240 } else {
241 rc = -1;
242 INFO("%s %d: timer_rc = %d\n", __func__, __LINE__, timer_rc_val);
243 }
244
245 /* Restore previous DAIF flags */
246 write_daif(flags);
247 isb();
248
249 if (timer_rc)
250 *timer_rc = timer_rc_val;
251 if (suspend_rc)
252 *suspend_rc = suspend_rc_val;
253 /*
254 * If IRQs were disabled when calling this function, the timer IRQ
255 * handler won't be called and the timer interrupt will be pending, but
256 * that isn't necessarily a problem.
257 */
258
259 return rc;
260}
261
262int tftf_program_timer_and_sys_suspend(unsigned long milli_secs,
263 int *timer_rc, int *suspend_rc)
264{
265 int rc = 0;
266 u_register_t flags;
267
268 /* Default to successful return codes */
269 int timer_rc_val = 0;
270 int suspend_rc_val = PSCI_E_SUCCESS;
271
272 /* Preserve DAIF flags. IRQs need to be disabled for this to work. */
273 flags = read_daif();
274 disable_irq();
275
276 /*
277 * Even with IRQs masked, the timer IRQ will wake the CPU up.
278 *
279 * If the timer IRQ happens before entering suspend mode (because the
280 * timer took too long to program, for example) the fact that the IRQ is
281 * pending will prevent the CPU from entering suspend mode and not being
282 * able to wake up.
283 */
284 timer_rc_val = tftf_program_timer(milli_secs);
285 if (timer_rc_val == 0) {
286 suspend_rc_val = tftf_system_suspend();
287 if (suspend_rc_val != PSCI_E_SUCCESS) {
288 rc = -1;
289 INFO("%s %d: suspend_rc = %d\n", __func__, __LINE__,
290 suspend_rc_val);
291 }
292 } else {
293 rc = -1;
294 INFO("%s %d: timer_rc = %d\n", __func__, __LINE__, timer_rc_val);
295 }
296
297 /* Restore previous DAIF flags */
298 write_daif(flags);
299 isb();
300
301 /*
302 * If IRQs were disabled when calling this function, the timer IRQ
303 * handler won't be called and the timer interrupt will be pending, but
304 * that isn't necessarily a problem.
305 */
306 if (timer_rc)
307 *timer_rc = timer_rc_val;
308 if (suspend_rc)
309 *suspend_rc = suspend_rc_val;
310
311 return rc;
312}
313
314int tftf_timer_sleep(unsigned long milli_secs)
315{
316 int ret, power_state;
317 uint32_t stateid;
318
319 ret = tftf_psci_make_composite_state_id(MPIDR_AFFLVL0,
320 PSTATE_TYPE_STANDBY, &stateid);
321 if (ret != PSCI_E_SUCCESS)
322 return -1;
323
324 power_state = tftf_make_psci_pstate(MPIDR_AFFLVL0, PSTATE_TYPE_STANDBY,
325 stateid);
326 ret = tftf_program_timer_and_suspend(milli_secs, power_state,
327 NULL, NULL);
328 if (ret != 0)
329 return -1;
330
331 return 0;
332}
333
334int tftf_cancel_timer(void)
335{
336 unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
337 unsigned int next_timer_req_core_pos;
338 unsigned long long current_time;
339 u_register_t flags;
340 int rc = 0;
341
342 /*
343 * IRQ is disabled so that if a timer is fired after taking a lock,
344 * it will remain pending and a core does not hit IRQ handler trying
345 * to acquire an already locked spin_lock causing dead lock.
346 */
347 flags = read_daif();
348 disable_irq();
349 spin_lock(&timer_lock);
350
351 interrupt_req_time[core_pos] = INVALID_TIME;
352
353 if (core_pos == current_prog_core) {
354 /*
355 * Cancel the programmed interrupt at the peripheral. If the
356 * timer interrupt is level triggered and fired this also
357 * deactivates the pending interrupt.
358 */
359 rc = plat_timer_info->cancel();
360 /* We don't expect cancel timer to fail */
361 if (rc) {
362 ERROR("%s %d: rc = %d\n", __func__, __LINE__, rc);
363 goto exit;
364 }
365
366 /*
367 * For edge triggered interrupts, if an IRQ is fired before
368 * cancel timer is executed, the signal remains pending. So,
369 * clear the Timer IRQ if it is already pending.
370 */
371 if (arm_gic_is_intr_pending(TIMER_IRQ))
372 arm_gic_intr_clear(TIMER_IRQ);
373
374 /* Get next timer consumer */
375 next_timer_req_core_pos = get_lowest_req_core();
376 if (next_timer_req_core_pos != INVALID_CORE) {
377
378 /* Retarget to the next_timer_req_core_pos */
379 arm_gic_set_intr_target(TIMER_IRQ, next_timer_req_core_pos);
380 current_prog_core = next_timer_req_core_pos;
381
382 current_time = get_current_time_ms();
383
384 /*
385 * If the next timer request is lesser than or in a
386 * window of TIMER_STEP_VALUE from current time,
387 * program it to fire after TIMER_STEP_VALUE.
388 */
389 if (interrupt_req_time[next_timer_req_core_pos] >
390 current_time + TIMER_STEP_VALUE)
391 rc = PROGRAM_TIMER(interrupt_req_time[next_timer_req_core_pos] - current_time);
392 else
393 rc = PROGRAM_TIMER(TIMER_STEP_VALUE);
394 VERBOSE("Cancel and program new timer for core_pos: "
395 "%d %lld\n",
396 next_timer_req_core_pos,
397 get_current_prog_time());
398 /* We don't expect timer programming to fail */
399 if (rc)
400 ERROR("%s %d: rc = %d\n", __func__, __LINE__, rc);
401 } else {
402 current_prog_core = INVALID_CORE;
403 VERBOSE("Cancelling timer : %d\n", core_pos);
404 }
405 }
406exit:
407 spin_unlock(&timer_lock);
408
409 /* Restore DAIF flags */
410 write_daif(flags);
411 isb();
412
413 return rc;
414}
415
416int tftf_timer_framework_handler(void *data)
417{
418 unsigned int handler_core_pos = platform_get_core_pos(read_mpidr_el1());
419 unsigned int next_timer_req_core_pos;
420 unsigned long long current_time;
421 int rc = 0;
422
423 assert(interrupt_req_time[handler_core_pos] != INVALID_TIME);
424 spin_lock(&timer_lock);
425
426 current_time = get_current_time_ms();
427 /* Check if we interrupt is targeted correctly */
428 assert(handler_core_pos == current_prog_core);
429
430 interrupt_req_time[handler_core_pos] = INVALID_TIME;
431
432 /* Execute the driver handler */
433 if (plat_timer_info->handler)
434 plat_timer_info->handler();
435
436 if (arm_gic_is_intr_pending(TIMER_IRQ)) {
437 /*
438 * We might never manage to acquire the printf lock here
439 * (because we are in ISR context) but we're gonna panic right
440 * after anyway so it doesn't really matter.
441 */
442 ERROR("Timer IRQ still pending. Fatal error.\n");
443 panic();
444 }
445
446 /*
447 * Execute the handler requested by the core, the handlers for the
448 * other cores will be executed as part of handling IRQ_WAKE_SGI.
449 */
450 if (timer_handler[handler_core_pos])
451 timer_handler[handler_core_pos](data);
452
453 /* Send interrupts to all the CPUS in the min time block */
454 for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
455 if ((interrupt_req_time[i] <=
456 (current_time + TIMER_STEP_VALUE))) {
457 interrupt_req_time[i] = INVALID_TIME;
458 tftf_send_sgi(IRQ_WAKE_SGI, i);
459 }
460 }
461
462 /* Get the next lowest requested timer core and program it */
463 next_timer_req_core_pos = get_lowest_req_core();
464 if (next_timer_req_core_pos != INVALID_CORE) {
465 /* Check we have not exceeded the time for next core */
466 assert(interrupt_req_time[next_timer_req_core_pos] >
467 current_time);
468 arm_gic_set_intr_target(TIMER_IRQ, next_timer_req_core_pos);
469 rc = PROGRAM_TIMER(interrupt_req_time[next_timer_req_core_pos]
470 - current_time);
471 }
472 /* Update current program core to the newer one */
473 current_prog_core = next_timer_req_core_pos;
474
475 spin_unlock(&timer_lock);
476
477 return rc;
478}
479
480int tftf_timer_register_handler(irq_handler_t irq_handler)
481{
482 unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
483 int ret;
484
485 /* Validate no handler is registered */
486 assert(!timer_handler[core_pos]);
487 timer_handler[core_pos] = irq_handler;
488
489 /*
490 * Also register same handler to IRQ_WAKE_SGI, as it can be waken
491 * by it.
492 */
Boyan Karatotev6d144db2025-06-23 15:04:53 +0100493 ret = tftf_irq_register_handler_sgi(IRQ_WAKE_SGI, irq_handler);
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200494 assert(!ret);
495
496 return ret;
497}
498
499int tftf_timer_unregister_handler(void)
500{
501 unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
502 int ret;
503
504 /*
505 * Unregister the handler for IRQ_WAKE_SGI also
506 */
Boyan Karatotev6d144db2025-06-23 15:04:53 +0100507 ret = tftf_irq_unregister_handler_sgi(IRQ_WAKE_SGI);
Sandrine Bailleux3cd87d72018-10-09 11:12:55 +0200508 assert(!ret);
509 /* Validate a handler is registered */
510 assert(timer_handler[core_pos]);
511 timer_handler[core_pos] = 0;
512
513 return ret;
514}
515
516unsigned int tftf_get_timer_irq(void)
517{
518 /*
519 * Check if the timer info is initialised
520 */
521 assert(TIMER_IRQ);
522 return TIMER_IRQ;
523}
524
525unsigned int tftf_get_timer_step_value(void)
526{
527 assert(TIMER_STEP_VALUE);
528
529 return TIMER_STEP_VALUE;
530}
531
532/*
533 * There are 4 cases that could happen when a system is resuming from system
534 * suspend. The cases are:
535 * 1. The resumed core is the last core to power down and the
536 * timer interrupt was targeted to it. In this case, target the
537 * interrupt to our core and set the appropriate priority and enable it.
538 *
539 * 2. The resumed core was the last core to power down but the timer interrupt
540 * is targeted to another core because of timer request grouping within
541 * TIMER_STEP_VALUE. In this case, re-target the interrupt to our core
542 * and set the appropriate priority and enable it
543 *
544 * 3. The system suspend request was down-graded by firmware and the timer
545 * interrupt is targeted to another core which woke up first. In this case,
546 * that core will wake us up and the interrupt_req_time[] corresponding to
547 * our core will be cleared. In this case, no need to do anything as GIC
548 * state is preserved.
549 *
550 * 4. The system suspend is woken up by another external interrupt other
551 * than the timer framework interrupt. In this case, just enable the
552 * timer interrupt and set the correct priority at GICD.
553 */
554void tftf_timer_gic_state_restore(void)
555{
556 unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
557 spin_lock(&timer_lock);
558
559 arm_gic_set_intr_priority(TIMER_IRQ, GIC_HIGHEST_NS_PRIORITY);
560 arm_gic_intr_enable(TIMER_IRQ);
561
562 /* Check if the programmed core is the woken up core */
563 if (interrupt_req_time[core_pos] == INVALID_TIME) {
564 INFO("The programmed core is not the one woken up\n");
565 } else {
566 current_prog_core = core_pos;
567 arm_gic_set_intr_target(TIMER_IRQ, core_pos);
568 }
569
570 spin_unlock(&timer_lock);
571}
572