Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | // |
| 3 | // Copyright (C) 2000-2001 Deep Blue Solutions |
| 4 | // Copyright (C) 2002 Shane Nay (shane@minirl.com) |
| 5 | // Copyright (C) 2006-2007 Pavel Pisa (ppisa@pikron.com) |
| 6 | // Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de) |
| 7 | |
| 8 | #include <linux/interrupt.h> |
| 9 | #include <linux/irq.h> |
| 10 | #include <linux/clockchips.h> |
| 11 | #include <linux/clk.h> |
| 12 | #include <linux/delay.h> |
| 13 | #include <linux/err.h> |
| 14 | #include <linux/sched_clock.h> |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/of.h> |
| 17 | #include <linux/of_address.h> |
| 18 | #include <linux/of_irq.h> |
| 19 | #include <soc/imx/timer.h> |
| 20 | |
| 21 | /* |
| 22 | * There are 4 versions of the timer hardware on Freescale MXC hardware. |
| 23 | * - MX1/MXL |
| 24 | * - MX21, MX27. |
| 25 | * - MX25, MX31, MX35, MX37, MX51, MX6Q(rev1.0) |
| 26 | * - MX6DL, MX6SX, MX6Q(rev1.1+) |
| 27 | */ |
| 28 | |
| 29 | /* defines common for all i.MX */ |
| 30 | #define MXC_TCTL 0x00 |
| 31 | #define MXC_TCTL_TEN (1 << 0) /* Enable module */ |
| 32 | #define MXC_TPRER 0x04 |
| 33 | |
| 34 | /* MX1, MX21, MX27 */ |
| 35 | #define MX1_2_TCTL_CLK_PCLK1 (1 << 1) |
| 36 | #define MX1_2_TCTL_IRQEN (1 << 4) |
| 37 | #define MX1_2_TCTL_FRR (1 << 8) |
| 38 | #define MX1_2_TCMP 0x08 |
| 39 | #define MX1_2_TCN 0x10 |
| 40 | #define MX1_2_TSTAT 0x14 |
| 41 | |
| 42 | /* MX21, MX27 */ |
| 43 | #define MX2_TSTAT_CAPT (1 << 1) |
| 44 | #define MX2_TSTAT_COMP (1 << 0) |
| 45 | |
| 46 | /* MX31, MX35, MX25, MX5, MX6 */ |
| 47 | #define V2_TCTL_WAITEN (1 << 3) /* Wait enable mode */ |
| 48 | #define V2_TCTL_CLK_IPG (1 << 6) |
| 49 | #define V2_TCTL_CLK_PER (2 << 6) |
| 50 | #define V2_TCTL_CLK_OSC_DIV8 (5 << 6) |
| 51 | #define V2_TCTL_FRR (1 << 9) |
| 52 | #define V2_TCTL_24MEN (1 << 10) |
| 53 | #define V2_TPRER_PRE24M 12 |
| 54 | #define V2_IR 0x0c |
| 55 | #define V2_TSTAT 0x08 |
| 56 | #define V2_TSTAT_OF1 (1 << 0) |
| 57 | #define V2_TCN 0x24 |
| 58 | #define V2_TCMP 0x10 |
| 59 | |
| 60 | #define V2_TIMER_RATE_OSC_DIV8 3000000 |
| 61 | |
| 62 | struct imx_timer { |
| 63 | enum imx_gpt_type type; |
| 64 | void __iomem *base; |
| 65 | int irq; |
| 66 | struct clk *clk_per; |
| 67 | struct clk *clk_ipg; |
| 68 | const struct imx_gpt_data *gpt; |
| 69 | struct clock_event_device ced; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 70 | }; |
| 71 | |
| 72 | struct imx_gpt_data { |
| 73 | int reg_tstat; |
| 74 | int reg_tcn; |
| 75 | int reg_tcmp; |
| 76 | void (*gpt_setup_tctl)(struct imx_timer *imxtm); |
| 77 | void (*gpt_irq_enable)(struct imx_timer *imxtm); |
| 78 | void (*gpt_irq_disable)(struct imx_timer *imxtm); |
| 79 | void (*gpt_irq_acknowledge)(struct imx_timer *imxtm); |
| 80 | int (*set_next_event)(unsigned long evt, |
| 81 | struct clock_event_device *ced); |
| 82 | }; |
| 83 | |
| 84 | static inline struct imx_timer *to_imx_timer(struct clock_event_device *ced) |
| 85 | { |
| 86 | return container_of(ced, struct imx_timer, ced); |
| 87 | } |
| 88 | |
| 89 | static void imx1_gpt_irq_disable(struct imx_timer *imxtm) |
| 90 | { |
| 91 | unsigned int tmp; |
| 92 | |
| 93 | tmp = readl_relaxed(imxtm->base + MXC_TCTL); |
| 94 | writel_relaxed(tmp & ~MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL); |
| 95 | } |
| 96 | #define imx21_gpt_irq_disable imx1_gpt_irq_disable |
| 97 | |
| 98 | static void imx31_gpt_irq_disable(struct imx_timer *imxtm) |
| 99 | { |
| 100 | writel_relaxed(0, imxtm->base + V2_IR); |
| 101 | } |
| 102 | #define imx6dl_gpt_irq_disable imx31_gpt_irq_disable |
| 103 | |
| 104 | static void imx1_gpt_irq_enable(struct imx_timer *imxtm) |
| 105 | { |
| 106 | unsigned int tmp; |
| 107 | |
| 108 | tmp = readl_relaxed(imxtm->base + MXC_TCTL); |
| 109 | writel_relaxed(tmp | MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL); |
| 110 | } |
| 111 | #define imx21_gpt_irq_enable imx1_gpt_irq_enable |
| 112 | |
| 113 | static void imx31_gpt_irq_enable(struct imx_timer *imxtm) |
| 114 | { |
| 115 | writel_relaxed(1<<0, imxtm->base + V2_IR); |
| 116 | } |
| 117 | #define imx6dl_gpt_irq_enable imx31_gpt_irq_enable |
| 118 | |
| 119 | static void imx1_gpt_irq_acknowledge(struct imx_timer *imxtm) |
| 120 | { |
| 121 | writel_relaxed(0, imxtm->base + MX1_2_TSTAT); |
| 122 | } |
| 123 | |
| 124 | static void imx21_gpt_irq_acknowledge(struct imx_timer *imxtm) |
| 125 | { |
| 126 | writel_relaxed(MX2_TSTAT_CAPT | MX2_TSTAT_COMP, |
| 127 | imxtm->base + MX1_2_TSTAT); |
| 128 | } |
| 129 | |
| 130 | static void imx31_gpt_irq_acknowledge(struct imx_timer *imxtm) |
| 131 | { |
| 132 | writel_relaxed(V2_TSTAT_OF1, imxtm->base + V2_TSTAT); |
| 133 | } |
| 134 | #define imx6dl_gpt_irq_acknowledge imx31_gpt_irq_acknowledge |
| 135 | |
| 136 | static void __iomem *sched_clock_reg; |
| 137 | |
| 138 | static u64 notrace mxc_read_sched_clock(void) |
| 139 | { |
| 140 | return sched_clock_reg ? readl_relaxed(sched_clock_reg) : 0; |
| 141 | } |
| 142 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 143 | #if defined(CONFIG_ARM) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 144 | static struct delay_timer imx_delay_timer; |
| 145 | |
| 146 | static unsigned long imx_read_current_timer(void) |
| 147 | { |
| 148 | return readl_relaxed(sched_clock_reg); |
| 149 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 150 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 151 | |
| 152 | static int __init mxc_clocksource_init(struct imx_timer *imxtm) |
| 153 | { |
| 154 | unsigned int c = clk_get_rate(imxtm->clk_per); |
| 155 | void __iomem *reg = imxtm->base + imxtm->gpt->reg_tcn; |
| 156 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 157 | #if defined(CONFIG_ARM) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 158 | imx_delay_timer.read_current_timer = &imx_read_current_timer; |
| 159 | imx_delay_timer.freq = c; |
| 160 | register_current_timer_delay(&imx_delay_timer); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 161 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 162 | |
| 163 | sched_clock_reg = reg; |
| 164 | |
| 165 | sched_clock_register(mxc_read_sched_clock, 32, c); |
| 166 | return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32, |
| 167 | clocksource_mmio_readl_up); |
| 168 | } |
| 169 | |
| 170 | /* clock event */ |
| 171 | |
| 172 | static int mx1_2_set_next_event(unsigned long evt, |
| 173 | struct clock_event_device *ced) |
| 174 | { |
| 175 | struct imx_timer *imxtm = to_imx_timer(ced); |
| 176 | unsigned long tcmp; |
| 177 | |
| 178 | tcmp = readl_relaxed(imxtm->base + MX1_2_TCN) + evt; |
| 179 | |
| 180 | writel_relaxed(tcmp, imxtm->base + MX1_2_TCMP); |
| 181 | |
| 182 | return (int)(tcmp - readl_relaxed(imxtm->base + MX1_2_TCN)) < 0 ? |
| 183 | -ETIME : 0; |
| 184 | } |
| 185 | |
| 186 | static int v2_set_next_event(unsigned long evt, |
| 187 | struct clock_event_device *ced) |
| 188 | { |
| 189 | struct imx_timer *imxtm = to_imx_timer(ced); |
| 190 | unsigned long tcmp; |
| 191 | |
| 192 | tcmp = readl_relaxed(imxtm->base + V2_TCN) + evt; |
| 193 | |
| 194 | writel_relaxed(tcmp, imxtm->base + V2_TCMP); |
| 195 | |
| 196 | return evt < 0x7fffffff && |
| 197 | (int)(tcmp - readl_relaxed(imxtm->base + V2_TCN)) < 0 ? |
| 198 | -ETIME : 0; |
| 199 | } |
| 200 | |
| 201 | static int mxc_shutdown(struct clock_event_device *ced) |
| 202 | { |
| 203 | struct imx_timer *imxtm = to_imx_timer(ced); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 204 | u32 tcn; |
| 205 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 206 | /* Disable interrupt in GPT module */ |
| 207 | imxtm->gpt->gpt_irq_disable(imxtm); |
| 208 | |
| 209 | tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn); |
| 210 | /* Set event time into far-far future */ |
| 211 | writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp); |
| 212 | |
| 213 | /* Clear pending interrupt */ |
| 214 | imxtm->gpt->gpt_irq_acknowledge(imxtm); |
| 215 | |
| 216 | #ifdef DEBUG |
| 217 | printk(KERN_INFO "%s: changing mode\n", __func__); |
| 218 | #endif /* DEBUG */ |
| 219 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 220 | return 0; |
| 221 | } |
| 222 | |
| 223 | static int mxc_set_oneshot(struct clock_event_device *ced) |
| 224 | { |
| 225 | struct imx_timer *imxtm = to_imx_timer(ced); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 226 | |
| 227 | /* Disable interrupt in GPT module */ |
| 228 | imxtm->gpt->gpt_irq_disable(imxtm); |
| 229 | |
| 230 | if (!clockevent_state_oneshot(ced)) { |
| 231 | u32 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn); |
| 232 | /* Set event time into far-far future */ |
| 233 | writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp); |
| 234 | |
| 235 | /* Clear pending interrupt */ |
| 236 | imxtm->gpt->gpt_irq_acknowledge(imxtm); |
| 237 | } |
| 238 | |
| 239 | #ifdef DEBUG |
| 240 | printk(KERN_INFO "%s: changing mode\n", __func__); |
| 241 | #endif /* DEBUG */ |
| 242 | |
| 243 | /* |
| 244 | * Do not put overhead of interrupt enable/disable into |
| 245 | * mxc_set_next_event(), the core has about 4 minutes |
| 246 | * to call mxc_set_next_event() or shutdown clock after |
| 247 | * mode switching |
| 248 | */ |
| 249 | imxtm->gpt->gpt_irq_enable(imxtm); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 250 | |
| 251 | return 0; |
| 252 | } |
| 253 | |
| 254 | /* |
| 255 | * IRQ handler for the timer |
| 256 | */ |
| 257 | static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id) |
| 258 | { |
| 259 | struct clock_event_device *ced = dev_id; |
| 260 | struct imx_timer *imxtm = to_imx_timer(ced); |
| 261 | uint32_t tstat; |
| 262 | |
| 263 | tstat = readl_relaxed(imxtm->base + imxtm->gpt->reg_tstat); |
| 264 | |
| 265 | imxtm->gpt->gpt_irq_acknowledge(imxtm); |
| 266 | |
| 267 | ced->event_handler(ced); |
| 268 | |
| 269 | return IRQ_HANDLED; |
| 270 | } |
| 271 | |
| 272 | static int __init mxc_clockevent_init(struct imx_timer *imxtm) |
| 273 | { |
| 274 | struct clock_event_device *ced = &imxtm->ced; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 275 | |
| 276 | ced->name = "mxc_timer1"; |
| 277 | ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ; |
| 278 | ced->set_state_shutdown = mxc_shutdown; |
| 279 | ced->set_state_oneshot = mxc_set_oneshot; |
| 280 | ced->tick_resume = mxc_shutdown; |
| 281 | ced->set_next_event = imxtm->gpt->set_next_event; |
| 282 | ced->rating = 200; |
| 283 | ced->cpumask = cpumask_of(0); |
| 284 | ced->irq = imxtm->irq; |
| 285 | clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per), |
| 286 | 0xff, 0xfffffffe); |
| 287 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 288 | return request_irq(imxtm->irq, mxc_timer_interrupt, |
| 289 | IRQF_TIMER | IRQF_IRQPOLL, "i.MX Timer Tick", ced); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 290 | } |
| 291 | |
| 292 | static void imx1_gpt_setup_tctl(struct imx_timer *imxtm) |
| 293 | { |
| 294 | u32 tctl_val; |
| 295 | |
| 296 | tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN; |
| 297 | writel_relaxed(tctl_val, imxtm->base + MXC_TCTL); |
| 298 | } |
| 299 | #define imx21_gpt_setup_tctl imx1_gpt_setup_tctl |
| 300 | |
| 301 | static void imx31_gpt_setup_tctl(struct imx_timer *imxtm) |
| 302 | { |
| 303 | u32 tctl_val; |
| 304 | |
| 305 | tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN; |
| 306 | if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8) |
| 307 | tctl_val |= V2_TCTL_CLK_OSC_DIV8; |
| 308 | else |
| 309 | tctl_val |= V2_TCTL_CLK_PER; |
| 310 | |
| 311 | writel_relaxed(tctl_val, imxtm->base + MXC_TCTL); |
| 312 | } |
| 313 | |
| 314 | static void imx6dl_gpt_setup_tctl(struct imx_timer *imxtm) |
| 315 | { |
| 316 | u32 tctl_val; |
| 317 | |
| 318 | tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN; |
| 319 | if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8) { |
| 320 | tctl_val |= V2_TCTL_CLK_OSC_DIV8; |
| 321 | /* 24 / 8 = 3 MHz */ |
| 322 | writel_relaxed(7 << V2_TPRER_PRE24M, imxtm->base + MXC_TPRER); |
| 323 | tctl_val |= V2_TCTL_24MEN; |
| 324 | } else { |
| 325 | tctl_val |= V2_TCTL_CLK_PER; |
| 326 | } |
| 327 | |
| 328 | writel_relaxed(tctl_val, imxtm->base + MXC_TCTL); |
| 329 | } |
| 330 | |
| 331 | static const struct imx_gpt_data imx1_gpt_data = { |
| 332 | .reg_tstat = MX1_2_TSTAT, |
| 333 | .reg_tcn = MX1_2_TCN, |
| 334 | .reg_tcmp = MX1_2_TCMP, |
| 335 | .gpt_irq_enable = imx1_gpt_irq_enable, |
| 336 | .gpt_irq_disable = imx1_gpt_irq_disable, |
| 337 | .gpt_irq_acknowledge = imx1_gpt_irq_acknowledge, |
| 338 | .gpt_setup_tctl = imx1_gpt_setup_tctl, |
| 339 | .set_next_event = mx1_2_set_next_event, |
| 340 | }; |
| 341 | |
| 342 | static const struct imx_gpt_data imx21_gpt_data = { |
| 343 | .reg_tstat = MX1_2_TSTAT, |
| 344 | .reg_tcn = MX1_2_TCN, |
| 345 | .reg_tcmp = MX1_2_TCMP, |
| 346 | .gpt_irq_enable = imx21_gpt_irq_enable, |
| 347 | .gpt_irq_disable = imx21_gpt_irq_disable, |
| 348 | .gpt_irq_acknowledge = imx21_gpt_irq_acknowledge, |
| 349 | .gpt_setup_tctl = imx21_gpt_setup_tctl, |
| 350 | .set_next_event = mx1_2_set_next_event, |
| 351 | }; |
| 352 | |
| 353 | static const struct imx_gpt_data imx31_gpt_data = { |
| 354 | .reg_tstat = V2_TSTAT, |
| 355 | .reg_tcn = V2_TCN, |
| 356 | .reg_tcmp = V2_TCMP, |
| 357 | .gpt_irq_enable = imx31_gpt_irq_enable, |
| 358 | .gpt_irq_disable = imx31_gpt_irq_disable, |
| 359 | .gpt_irq_acknowledge = imx31_gpt_irq_acknowledge, |
| 360 | .gpt_setup_tctl = imx31_gpt_setup_tctl, |
| 361 | .set_next_event = v2_set_next_event, |
| 362 | }; |
| 363 | |
| 364 | static const struct imx_gpt_data imx6dl_gpt_data = { |
| 365 | .reg_tstat = V2_TSTAT, |
| 366 | .reg_tcn = V2_TCN, |
| 367 | .reg_tcmp = V2_TCMP, |
| 368 | .gpt_irq_enable = imx6dl_gpt_irq_enable, |
| 369 | .gpt_irq_disable = imx6dl_gpt_irq_disable, |
| 370 | .gpt_irq_acknowledge = imx6dl_gpt_irq_acknowledge, |
| 371 | .gpt_setup_tctl = imx6dl_gpt_setup_tctl, |
| 372 | .set_next_event = v2_set_next_event, |
| 373 | }; |
| 374 | |
| 375 | static int __init _mxc_timer_init(struct imx_timer *imxtm) |
| 376 | { |
| 377 | int ret; |
| 378 | |
| 379 | switch (imxtm->type) { |
| 380 | case GPT_TYPE_IMX1: |
| 381 | imxtm->gpt = &imx1_gpt_data; |
| 382 | break; |
| 383 | case GPT_TYPE_IMX21: |
| 384 | imxtm->gpt = &imx21_gpt_data; |
| 385 | break; |
| 386 | case GPT_TYPE_IMX31: |
| 387 | imxtm->gpt = &imx31_gpt_data; |
| 388 | break; |
| 389 | case GPT_TYPE_IMX6DL: |
| 390 | imxtm->gpt = &imx6dl_gpt_data; |
| 391 | break; |
| 392 | default: |
| 393 | return -EINVAL; |
| 394 | } |
| 395 | |
| 396 | if (IS_ERR(imxtm->clk_per)) { |
| 397 | pr_err("i.MX timer: unable to get clk\n"); |
| 398 | return PTR_ERR(imxtm->clk_per); |
| 399 | } |
| 400 | |
| 401 | if (!IS_ERR(imxtm->clk_ipg)) |
| 402 | clk_prepare_enable(imxtm->clk_ipg); |
| 403 | |
| 404 | clk_prepare_enable(imxtm->clk_per); |
| 405 | |
| 406 | /* |
| 407 | * Initialise to a known state (all timers off, and timing reset) |
| 408 | */ |
| 409 | |
| 410 | writel_relaxed(0, imxtm->base + MXC_TCTL); |
| 411 | writel_relaxed(0, imxtm->base + MXC_TPRER); /* see datasheet note */ |
| 412 | |
| 413 | imxtm->gpt->gpt_setup_tctl(imxtm); |
| 414 | |
| 415 | /* init and register the timer to the framework */ |
| 416 | ret = mxc_clocksource_init(imxtm); |
| 417 | if (ret) |
| 418 | return ret; |
| 419 | |
| 420 | return mxc_clockevent_init(imxtm); |
| 421 | } |
| 422 | |
| 423 | void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type) |
| 424 | { |
| 425 | struct imx_timer *imxtm; |
| 426 | |
| 427 | imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL); |
| 428 | BUG_ON(!imxtm); |
| 429 | |
| 430 | imxtm->clk_per = clk_get_sys("imx-gpt.0", "per"); |
| 431 | imxtm->clk_ipg = clk_get_sys("imx-gpt.0", "ipg"); |
| 432 | |
| 433 | imxtm->base = ioremap(pbase, SZ_4K); |
| 434 | BUG_ON(!imxtm->base); |
| 435 | |
| 436 | imxtm->type = type; |
| 437 | imxtm->irq = irq; |
| 438 | |
| 439 | _mxc_timer_init(imxtm); |
| 440 | } |
| 441 | |
| 442 | static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type) |
| 443 | { |
| 444 | struct imx_timer *imxtm; |
| 445 | static int initialized; |
| 446 | int ret; |
| 447 | |
| 448 | /* Support one instance only */ |
| 449 | if (initialized) |
| 450 | return 0; |
| 451 | |
| 452 | imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL); |
| 453 | if (!imxtm) |
| 454 | return -ENOMEM; |
| 455 | |
| 456 | imxtm->base = of_iomap(np, 0); |
| 457 | if (!imxtm->base) |
| 458 | return -ENXIO; |
| 459 | |
| 460 | imxtm->irq = irq_of_parse_and_map(np, 0); |
| 461 | if (imxtm->irq <= 0) |
| 462 | return -EINVAL; |
| 463 | |
| 464 | imxtm->clk_ipg = of_clk_get_by_name(np, "ipg"); |
| 465 | |
| 466 | /* Try osc_per first, and fall back to per otherwise */ |
| 467 | imxtm->clk_per = of_clk_get_by_name(np, "osc_per"); |
| 468 | if (IS_ERR(imxtm->clk_per)) |
| 469 | imxtm->clk_per = of_clk_get_by_name(np, "per"); |
| 470 | |
| 471 | imxtm->type = type; |
| 472 | |
| 473 | ret = _mxc_timer_init(imxtm); |
| 474 | if (ret) |
| 475 | return ret; |
| 476 | |
| 477 | initialized = 1; |
| 478 | |
| 479 | return 0; |
| 480 | } |
| 481 | |
| 482 | static int __init imx1_timer_init_dt(struct device_node *np) |
| 483 | { |
| 484 | return mxc_timer_init_dt(np, GPT_TYPE_IMX1); |
| 485 | } |
| 486 | |
| 487 | static int __init imx21_timer_init_dt(struct device_node *np) |
| 488 | { |
| 489 | return mxc_timer_init_dt(np, GPT_TYPE_IMX21); |
| 490 | } |
| 491 | |
| 492 | static int __init imx31_timer_init_dt(struct device_node *np) |
| 493 | { |
| 494 | enum imx_gpt_type type = GPT_TYPE_IMX31; |
| 495 | |
| 496 | /* |
| 497 | * We were using the same compatible string for i.MX6Q/D and i.MX6DL/S |
| 498 | * GPT device, while they actually have different programming model. |
| 499 | * This is a workaround to keep the existing i.MX6DL/S DTBs continue |
| 500 | * working with the new kernel. |
| 501 | */ |
| 502 | if (of_machine_is_compatible("fsl,imx6dl")) |
| 503 | type = GPT_TYPE_IMX6DL; |
| 504 | |
| 505 | return mxc_timer_init_dt(np, type); |
| 506 | } |
| 507 | |
| 508 | static int __init imx6dl_timer_init_dt(struct device_node *np) |
| 509 | { |
| 510 | return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL); |
| 511 | } |
| 512 | |
| 513 | TIMER_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt); |
| 514 | TIMER_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt); |
| 515 | TIMER_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt); |
| 516 | TIMER_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt); |
| 517 | TIMER_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt); |
| 518 | TIMER_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt); |
| 519 | TIMER_OF_DECLARE(imx51_timer, "fsl,imx51-gpt", imx31_timer_init_dt); |
| 520 | TIMER_OF_DECLARE(imx53_timer, "fsl,imx53-gpt", imx31_timer_init_dt); |
| 521 | TIMER_OF_DECLARE(imx6q_timer, "fsl,imx6q-gpt", imx31_timer_init_dt); |
| 522 | TIMER_OF_DECLARE(imx6dl_timer, "fsl,imx6dl-gpt", imx6dl_timer_init_dt); |
| 523 | TIMER_OF_DECLARE(imx6sl_timer, "fsl,imx6sl-gpt", imx6dl_timer_init_dt); |
| 524 | TIMER_OF_DECLARE(imx6sx_timer, "fsl,imx6sx-gpt", imx6dl_timer_init_dt); |