Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * arch/arm/mach-ixp4xx/common.c |
| 3 | * |
| 4 | * Generic code shared across all IXP4XX platforms |
| 5 | * |
| 6 | * Maintainer: Deepak Saxena <dsaxena@plexity.net> |
| 7 | * |
| 8 | * Copyright 2002 (c) Intel Corporation |
| 9 | * Copyright 2003-2004 (c) MontaVista, Software, Inc. |
| 10 | * |
| 11 | * This file is licensed under the terms of the GNU General Public |
| 12 | * License version 2. This program is licensed "as is" without any |
| 13 | * warranty of any kind, whether express or implied. |
| 14 | */ |
| 15 | |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/mm.h> |
| 18 | #include <linux/init.h> |
| 19 | #include <linux/serial.h> |
| 20 | #include <linux/tty.h> |
| 21 | #include <linux/platform_device.h> |
| 22 | #include <linux/serial_core.h> |
| 23 | #include <linux/interrupt.h> |
| 24 | #include <linux/bitops.h> |
| 25 | #include <linux/time.h> |
| 26 | #include <linux/clocksource.h> |
| 27 | #include <linux/clockchips.h> |
| 28 | #include <linux/io.h> |
| 29 | #include <linux/export.h> |
| 30 | #include <linux/gpio/driver.h> |
| 31 | #include <linux/cpu.h> |
| 32 | #include <linux/pci.h> |
| 33 | #include <linux/sched_clock.h> |
| 34 | #include <mach/udc.h> |
| 35 | #include <mach/hardware.h> |
| 36 | #include <mach/io.h> |
| 37 | #include <linux/uaccess.h> |
| 38 | #include <asm/pgtable.h> |
| 39 | #include <asm/page.h> |
| 40 | #include <asm/irq.h> |
| 41 | #include <asm/system_misc.h> |
| 42 | #include <asm/mach/map.h> |
| 43 | #include <asm/mach/irq.h> |
| 44 | #include <asm/mach/time.h> |
| 45 | |
| 46 | #define IXP4XX_TIMER_FREQ 66666000 |
| 47 | |
| 48 | /* |
| 49 | * The timer register doesn't allow to specify the two least significant bits of |
| 50 | * the timeout value and assumes them being zero. So make sure IXP4XX_LATCH is |
| 51 | * the best value with the two least significant bits unset. |
| 52 | */ |
| 53 | #define IXP4XX_LATCH DIV_ROUND_CLOSEST(IXP4XX_TIMER_FREQ, \ |
| 54 | (IXP4XX_OST_RELOAD_MASK + 1) * HZ) * \ |
| 55 | (IXP4XX_OST_RELOAD_MASK + 1) |
| 56 | |
| 57 | static void __init ixp4xx_clocksource_init(void); |
| 58 | static void __init ixp4xx_clockevent_init(void); |
| 59 | static struct clock_event_device clockevent_ixp4xx; |
| 60 | |
| 61 | /************************************************************************* |
| 62 | * IXP4xx chipset I/O mapping |
| 63 | *************************************************************************/ |
| 64 | static struct map_desc ixp4xx_io_desc[] __initdata = { |
| 65 | { /* UART, Interrupt ctrl, GPIO, timers, NPEs, MACs, USB .... */ |
| 66 | .virtual = (unsigned long)IXP4XX_PERIPHERAL_BASE_VIRT, |
| 67 | .pfn = __phys_to_pfn(IXP4XX_PERIPHERAL_BASE_PHYS), |
| 68 | .length = IXP4XX_PERIPHERAL_REGION_SIZE, |
| 69 | .type = MT_DEVICE |
| 70 | }, { /* Expansion Bus Config Registers */ |
| 71 | .virtual = (unsigned long)IXP4XX_EXP_CFG_BASE_VIRT, |
| 72 | .pfn = __phys_to_pfn(IXP4XX_EXP_CFG_BASE_PHYS), |
| 73 | .length = IXP4XX_EXP_CFG_REGION_SIZE, |
| 74 | .type = MT_DEVICE |
| 75 | }, { /* PCI Registers */ |
| 76 | .virtual = (unsigned long)IXP4XX_PCI_CFG_BASE_VIRT, |
| 77 | .pfn = __phys_to_pfn(IXP4XX_PCI_CFG_BASE_PHYS), |
| 78 | .length = IXP4XX_PCI_CFG_REGION_SIZE, |
| 79 | .type = MT_DEVICE |
| 80 | }, { /* Queue Manager */ |
| 81 | .virtual = (unsigned long)IXP4XX_QMGR_BASE_VIRT, |
| 82 | .pfn = __phys_to_pfn(IXP4XX_QMGR_BASE_PHYS), |
| 83 | .length = IXP4XX_QMGR_REGION_SIZE, |
| 84 | .type = MT_DEVICE |
| 85 | }, |
| 86 | }; |
| 87 | |
| 88 | void __init ixp4xx_map_io(void) |
| 89 | { |
| 90 | iotable_init(ixp4xx_io_desc, ARRAY_SIZE(ixp4xx_io_desc)); |
| 91 | } |
| 92 | |
| 93 | /* |
| 94 | * GPIO-functions |
| 95 | */ |
| 96 | /* |
| 97 | * The following converted to the real HW bits the gpio_line_config |
| 98 | */ |
| 99 | /* GPIO pin types */ |
| 100 | #define IXP4XX_GPIO_OUT 0x1 |
| 101 | #define IXP4XX_GPIO_IN 0x2 |
| 102 | |
| 103 | /* GPIO signal types */ |
| 104 | #define IXP4XX_GPIO_LOW 0 |
| 105 | #define IXP4XX_GPIO_HIGH 1 |
| 106 | |
| 107 | /* GPIO Clocks */ |
| 108 | #define IXP4XX_GPIO_CLK_0 14 |
| 109 | #define IXP4XX_GPIO_CLK_1 15 |
| 110 | |
| 111 | static void gpio_line_config(u8 line, u32 direction) |
| 112 | { |
| 113 | if (direction == IXP4XX_GPIO_IN) |
| 114 | *IXP4XX_GPIO_GPOER |= (1 << line); |
| 115 | else |
| 116 | *IXP4XX_GPIO_GPOER &= ~(1 << line); |
| 117 | } |
| 118 | |
| 119 | static void gpio_line_get(u8 line, int *value) |
| 120 | { |
| 121 | *value = (*IXP4XX_GPIO_GPINR >> line) & 0x1; |
| 122 | } |
| 123 | |
| 124 | static void gpio_line_set(u8 line, int value) |
| 125 | { |
| 126 | if (value == IXP4XX_GPIO_HIGH) |
| 127 | *IXP4XX_GPIO_GPOUTR |= (1 << line); |
| 128 | else if (value == IXP4XX_GPIO_LOW) |
| 129 | *IXP4XX_GPIO_GPOUTR &= ~(1 << line); |
| 130 | } |
| 131 | |
| 132 | /************************************************************************* |
| 133 | * IXP4xx chipset IRQ handling |
| 134 | * |
| 135 | * TODO: GPIO IRQs should be marked invalid until the user of the IRQ |
| 136 | * (be it PCI or something else) configures that GPIO line |
| 137 | * as an IRQ. |
| 138 | **************************************************************************/ |
| 139 | enum ixp4xx_irq_type { |
| 140 | IXP4XX_IRQ_LEVEL, IXP4XX_IRQ_EDGE |
| 141 | }; |
| 142 | |
| 143 | /* Each bit represents an IRQ: 1: edge-triggered, 0: level triggered */ |
| 144 | static unsigned long long ixp4xx_irq_edge = 0; |
| 145 | |
| 146 | /* |
| 147 | * IRQ -> GPIO mapping table |
| 148 | */ |
| 149 | static signed char irq2gpio[32] = { |
| 150 | -1, -1, -1, -1, -1, -1, 0, 1, |
| 151 | -1, -1, -1, -1, -1, -1, -1, -1, |
| 152 | -1, -1, -1, 2, 3, 4, 5, 6, |
| 153 | 7, 8, 9, 10, 11, 12, -1, -1, |
| 154 | }; |
| 155 | |
| 156 | static int ixp4xx_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) |
| 157 | { |
| 158 | int irq; |
| 159 | |
| 160 | for (irq = 0; irq < 32; irq++) { |
| 161 | if (irq2gpio[irq] == gpio) |
| 162 | return irq; |
| 163 | } |
| 164 | return -EINVAL; |
| 165 | } |
| 166 | |
| 167 | static int ixp4xx_set_irq_type(struct irq_data *d, unsigned int type) |
| 168 | { |
| 169 | int line = irq2gpio[d->irq]; |
| 170 | u32 int_style; |
| 171 | enum ixp4xx_irq_type irq_type; |
| 172 | volatile u32 *int_reg; |
| 173 | |
| 174 | /* |
| 175 | * Only for GPIO IRQs |
| 176 | */ |
| 177 | if (line < 0) |
| 178 | return -EINVAL; |
| 179 | |
| 180 | switch (type){ |
| 181 | case IRQ_TYPE_EDGE_BOTH: |
| 182 | int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL; |
| 183 | irq_type = IXP4XX_IRQ_EDGE; |
| 184 | break; |
| 185 | case IRQ_TYPE_EDGE_RISING: |
| 186 | int_style = IXP4XX_GPIO_STYLE_RISING_EDGE; |
| 187 | irq_type = IXP4XX_IRQ_EDGE; |
| 188 | break; |
| 189 | case IRQ_TYPE_EDGE_FALLING: |
| 190 | int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE; |
| 191 | irq_type = IXP4XX_IRQ_EDGE; |
| 192 | break; |
| 193 | case IRQ_TYPE_LEVEL_HIGH: |
| 194 | int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH; |
| 195 | irq_type = IXP4XX_IRQ_LEVEL; |
| 196 | break; |
| 197 | case IRQ_TYPE_LEVEL_LOW: |
| 198 | int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW; |
| 199 | irq_type = IXP4XX_IRQ_LEVEL; |
| 200 | break; |
| 201 | default: |
| 202 | return -EINVAL; |
| 203 | } |
| 204 | |
| 205 | if (irq_type == IXP4XX_IRQ_EDGE) |
| 206 | ixp4xx_irq_edge |= (1 << d->irq); |
| 207 | else |
| 208 | ixp4xx_irq_edge &= ~(1 << d->irq); |
| 209 | |
| 210 | if (line >= 8) { /* pins 8-15 */ |
| 211 | line -= 8; |
| 212 | int_reg = IXP4XX_GPIO_GPIT2R; |
| 213 | } else { /* pins 0-7 */ |
| 214 | int_reg = IXP4XX_GPIO_GPIT1R; |
| 215 | } |
| 216 | |
| 217 | /* Clear the style for the appropriate pin */ |
| 218 | *int_reg &= ~(IXP4XX_GPIO_STYLE_CLEAR << |
| 219 | (line * IXP4XX_GPIO_STYLE_SIZE)); |
| 220 | |
| 221 | *IXP4XX_GPIO_GPISR = (1 << line); |
| 222 | |
| 223 | /* Set the new style */ |
| 224 | *int_reg |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE)); |
| 225 | |
| 226 | /* Configure the line as an input */ |
| 227 | gpio_line_config(irq2gpio[d->irq], IXP4XX_GPIO_IN); |
| 228 | |
| 229 | return 0; |
| 230 | } |
| 231 | |
| 232 | static void ixp4xx_irq_mask(struct irq_data *d) |
| 233 | { |
| 234 | if ((cpu_is_ixp46x() || cpu_is_ixp43x()) && d->irq >= 32) |
| 235 | *IXP4XX_ICMR2 &= ~(1 << (d->irq - 32)); |
| 236 | else |
| 237 | *IXP4XX_ICMR &= ~(1 << d->irq); |
| 238 | } |
| 239 | |
| 240 | static void ixp4xx_irq_ack(struct irq_data *d) |
| 241 | { |
| 242 | int line = (d->irq < 32) ? irq2gpio[d->irq] : -1; |
| 243 | |
| 244 | if (line >= 0) |
| 245 | *IXP4XX_GPIO_GPISR = (1 << line); |
| 246 | } |
| 247 | |
| 248 | /* |
| 249 | * Level triggered interrupts on GPIO lines can only be cleared when the |
| 250 | * interrupt condition disappears. |
| 251 | */ |
| 252 | static void ixp4xx_irq_unmask(struct irq_data *d) |
| 253 | { |
| 254 | if (!(ixp4xx_irq_edge & (1 << d->irq))) |
| 255 | ixp4xx_irq_ack(d); |
| 256 | |
| 257 | if ((cpu_is_ixp46x() || cpu_is_ixp43x()) && d->irq >= 32) |
| 258 | *IXP4XX_ICMR2 |= (1 << (d->irq - 32)); |
| 259 | else |
| 260 | *IXP4XX_ICMR |= (1 << d->irq); |
| 261 | } |
| 262 | |
| 263 | static struct irq_chip ixp4xx_irq_chip = { |
| 264 | .name = "IXP4xx", |
| 265 | .irq_ack = ixp4xx_irq_ack, |
| 266 | .irq_mask = ixp4xx_irq_mask, |
| 267 | .irq_unmask = ixp4xx_irq_unmask, |
| 268 | .irq_set_type = ixp4xx_set_irq_type, |
| 269 | }; |
| 270 | |
| 271 | void __init ixp4xx_init_irq(void) |
| 272 | { |
| 273 | int i = 0; |
| 274 | |
| 275 | /* |
| 276 | * ixp4xx does not implement the XScale PWRMODE register |
| 277 | * so it must not call cpu_do_idle(). |
| 278 | */ |
| 279 | cpu_idle_poll_ctrl(true); |
| 280 | |
| 281 | /* Route all sources to IRQ instead of FIQ */ |
| 282 | *IXP4XX_ICLR = 0x0; |
| 283 | |
| 284 | /* Disable all interrupt */ |
| 285 | *IXP4XX_ICMR = 0x0; |
| 286 | |
| 287 | if (cpu_is_ixp46x() || cpu_is_ixp43x()) { |
| 288 | /* Route upper 32 sources to IRQ instead of FIQ */ |
| 289 | *IXP4XX_ICLR2 = 0x00; |
| 290 | |
| 291 | /* Disable upper 32 interrupts */ |
| 292 | *IXP4XX_ICMR2 = 0x00; |
| 293 | } |
| 294 | |
| 295 | /* Default to all level triggered */ |
| 296 | for(i = 0; i < NR_IRQS; i++) { |
| 297 | irq_set_chip_and_handler(i, &ixp4xx_irq_chip, |
| 298 | handle_level_irq); |
| 299 | irq_clear_status_flags(i, IRQ_NOREQUEST); |
| 300 | } |
| 301 | } |
| 302 | |
| 303 | |
| 304 | /************************************************************************* |
| 305 | * IXP4xx timer tick |
| 306 | * We use OS timer1 on the CPU for the timer tick and the timestamp |
| 307 | * counter as a source of real clock ticks to account for missed jiffies. |
| 308 | *************************************************************************/ |
| 309 | |
| 310 | static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id) |
| 311 | { |
| 312 | struct clock_event_device *evt = dev_id; |
| 313 | |
| 314 | /* Clear Pending Interrupt by writing '1' to it */ |
| 315 | *IXP4XX_OSST = IXP4XX_OSST_TIMER_1_PEND; |
| 316 | |
| 317 | evt->event_handler(evt); |
| 318 | |
| 319 | return IRQ_HANDLED; |
| 320 | } |
| 321 | |
| 322 | static struct irqaction ixp4xx_timer_irq = { |
| 323 | .name = "timer1", |
| 324 | .flags = IRQF_TIMER | IRQF_IRQPOLL, |
| 325 | .handler = ixp4xx_timer_interrupt, |
| 326 | .dev_id = &clockevent_ixp4xx, |
| 327 | }; |
| 328 | |
| 329 | void __init ixp4xx_timer_init(void) |
| 330 | { |
| 331 | /* Reset/disable counter */ |
| 332 | *IXP4XX_OSRT1 = 0; |
| 333 | |
| 334 | /* Clear Pending Interrupt by writing '1' to it */ |
| 335 | *IXP4XX_OSST = IXP4XX_OSST_TIMER_1_PEND; |
| 336 | |
| 337 | /* Reset time-stamp counter */ |
| 338 | *IXP4XX_OSTS = 0; |
| 339 | |
| 340 | /* Connect the interrupt handler and enable the interrupt */ |
| 341 | setup_irq(IRQ_IXP4XX_TIMER1, &ixp4xx_timer_irq); |
| 342 | |
| 343 | ixp4xx_clocksource_init(); |
| 344 | ixp4xx_clockevent_init(); |
| 345 | } |
| 346 | |
| 347 | static struct pxa2xx_udc_mach_info ixp4xx_udc_info; |
| 348 | |
| 349 | void __init ixp4xx_set_udc_info(struct pxa2xx_udc_mach_info *info) |
| 350 | { |
| 351 | memcpy(&ixp4xx_udc_info, info, sizeof *info); |
| 352 | } |
| 353 | |
| 354 | static struct resource ixp4xx_udc_resources[] = { |
| 355 | [0] = { |
| 356 | .start = 0xc800b000, |
| 357 | .end = 0xc800bfff, |
| 358 | .flags = IORESOURCE_MEM, |
| 359 | }, |
| 360 | [1] = { |
| 361 | .start = IRQ_IXP4XX_USB, |
| 362 | .end = IRQ_IXP4XX_USB, |
| 363 | .flags = IORESOURCE_IRQ, |
| 364 | }, |
| 365 | }; |
| 366 | |
| 367 | /* |
| 368 | * USB device controller. The IXP4xx uses the same controller as PXA25X, |
| 369 | * so we just use the same device. |
| 370 | */ |
| 371 | static struct platform_device ixp4xx_udc_device = { |
| 372 | .name = "pxa25x-udc", |
| 373 | .id = -1, |
| 374 | .num_resources = 2, |
| 375 | .resource = ixp4xx_udc_resources, |
| 376 | .dev = { |
| 377 | .platform_data = &ixp4xx_udc_info, |
| 378 | }, |
| 379 | }; |
| 380 | |
| 381 | static struct platform_device *ixp4xx_devices[] __initdata = { |
| 382 | &ixp4xx_udc_device, |
| 383 | }; |
| 384 | |
| 385 | static struct resource ixp46x_i2c_resources[] = { |
| 386 | [0] = { |
| 387 | .start = 0xc8011000, |
| 388 | .end = 0xc801101c, |
| 389 | .flags = IORESOURCE_MEM, |
| 390 | }, |
| 391 | [1] = { |
| 392 | .start = IRQ_IXP4XX_I2C, |
| 393 | .end = IRQ_IXP4XX_I2C, |
| 394 | .flags = IORESOURCE_IRQ |
| 395 | } |
| 396 | }; |
| 397 | |
| 398 | /* |
| 399 | * I2C controller. The IXP46x uses the same block as the IOP3xx, so |
| 400 | * we just use the same device name. |
| 401 | */ |
| 402 | static struct platform_device ixp46x_i2c_controller = { |
| 403 | .name = "IOP3xx-I2C", |
| 404 | .id = 0, |
| 405 | .num_resources = 2, |
| 406 | .resource = ixp46x_i2c_resources |
| 407 | }; |
| 408 | |
| 409 | static struct platform_device *ixp46x_devices[] __initdata = { |
| 410 | &ixp46x_i2c_controller |
| 411 | }; |
| 412 | |
| 413 | unsigned long ixp4xx_exp_bus_size; |
| 414 | EXPORT_SYMBOL(ixp4xx_exp_bus_size); |
| 415 | |
| 416 | static int ixp4xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) |
| 417 | { |
| 418 | gpio_line_config(gpio, IXP4XX_GPIO_IN); |
| 419 | |
| 420 | return 0; |
| 421 | } |
| 422 | |
| 423 | static int ixp4xx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, |
| 424 | int level) |
| 425 | { |
| 426 | gpio_line_set(gpio, level); |
| 427 | gpio_line_config(gpio, IXP4XX_GPIO_OUT); |
| 428 | |
| 429 | return 0; |
| 430 | } |
| 431 | |
| 432 | static int ixp4xx_gpio_get_value(struct gpio_chip *chip, unsigned gpio) |
| 433 | { |
| 434 | int value; |
| 435 | |
| 436 | gpio_line_get(gpio, &value); |
| 437 | |
| 438 | return value; |
| 439 | } |
| 440 | |
| 441 | static void ixp4xx_gpio_set_value(struct gpio_chip *chip, unsigned gpio, |
| 442 | int value) |
| 443 | { |
| 444 | gpio_line_set(gpio, value); |
| 445 | } |
| 446 | |
| 447 | static struct gpio_chip ixp4xx_gpio_chip = { |
| 448 | .label = "IXP4XX_GPIO_CHIP", |
| 449 | .direction_input = ixp4xx_gpio_direction_input, |
| 450 | .direction_output = ixp4xx_gpio_direction_output, |
| 451 | .get = ixp4xx_gpio_get_value, |
| 452 | .set = ixp4xx_gpio_set_value, |
| 453 | .to_irq = ixp4xx_gpio_to_irq, |
| 454 | .base = 0, |
| 455 | .ngpio = 16, |
| 456 | }; |
| 457 | |
| 458 | void __init ixp4xx_sys_init(void) |
| 459 | { |
| 460 | ixp4xx_exp_bus_size = SZ_16M; |
| 461 | |
| 462 | platform_add_devices(ixp4xx_devices, ARRAY_SIZE(ixp4xx_devices)); |
| 463 | |
| 464 | gpiochip_add_data(&ixp4xx_gpio_chip, NULL); |
| 465 | |
| 466 | if (cpu_is_ixp46x()) { |
| 467 | int region; |
| 468 | |
| 469 | platform_add_devices(ixp46x_devices, |
| 470 | ARRAY_SIZE(ixp46x_devices)); |
| 471 | |
| 472 | for (region = 0; region < 7; region++) { |
| 473 | if((*(IXP4XX_EXP_REG(0x4 * region)) & 0x200)) { |
| 474 | ixp4xx_exp_bus_size = SZ_32M; |
| 475 | break; |
| 476 | } |
| 477 | } |
| 478 | } |
| 479 | |
| 480 | printk("IXP4xx: Using %luMiB expansion bus window size\n", |
| 481 | ixp4xx_exp_bus_size >> 20); |
| 482 | } |
| 483 | |
| 484 | /* |
| 485 | * sched_clock() |
| 486 | */ |
| 487 | static u64 notrace ixp4xx_read_sched_clock(void) |
| 488 | { |
| 489 | return *IXP4XX_OSTS; |
| 490 | } |
| 491 | |
| 492 | /* |
| 493 | * clocksource |
| 494 | */ |
| 495 | |
| 496 | static u64 ixp4xx_clocksource_read(struct clocksource *c) |
| 497 | { |
| 498 | return *IXP4XX_OSTS; |
| 499 | } |
| 500 | |
| 501 | unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ; |
| 502 | EXPORT_SYMBOL(ixp4xx_timer_freq); |
| 503 | static void __init ixp4xx_clocksource_init(void) |
| 504 | { |
| 505 | sched_clock_register(ixp4xx_read_sched_clock, 32, ixp4xx_timer_freq); |
| 506 | |
| 507 | clocksource_mmio_init(NULL, "OSTS", ixp4xx_timer_freq, 200, 32, |
| 508 | ixp4xx_clocksource_read); |
| 509 | } |
| 510 | |
| 511 | /* |
| 512 | * clockevents |
| 513 | */ |
| 514 | static int ixp4xx_set_next_event(unsigned long evt, |
| 515 | struct clock_event_device *unused) |
| 516 | { |
| 517 | unsigned long opts = *IXP4XX_OSRT1 & IXP4XX_OST_RELOAD_MASK; |
| 518 | |
| 519 | *IXP4XX_OSRT1 = (evt & ~IXP4XX_OST_RELOAD_MASK) | opts; |
| 520 | |
| 521 | return 0; |
| 522 | } |
| 523 | |
| 524 | static int ixp4xx_shutdown(struct clock_event_device *evt) |
| 525 | { |
| 526 | unsigned long opts = *IXP4XX_OSRT1 & IXP4XX_OST_RELOAD_MASK; |
| 527 | unsigned long osrt = *IXP4XX_OSRT1 & ~IXP4XX_OST_RELOAD_MASK; |
| 528 | |
| 529 | opts &= ~IXP4XX_OST_ENABLE; |
| 530 | *IXP4XX_OSRT1 = osrt | opts; |
| 531 | return 0; |
| 532 | } |
| 533 | |
| 534 | static int ixp4xx_set_oneshot(struct clock_event_device *evt) |
| 535 | { |
| 536 | unsigned long opts = IXP4XX_OST_ENABLE | IXP4XX_OST_ONE_SHOT; |
| 537 | unsigned long osrt = 0; |
| 538 | |
| 539 | /* period set by 'set next_event' */ |
| 540 | *IXP4XX_OSRT1 = osrt | opts; |
| 541 | return 0; |
| 542 | } |
| 543 | |
| 544 | static int ixp4xx_set_periodic(struct clock_event_device *evt) |
| 545 | { |
| 546 | unsigned long opts = IXP4XX_OST_ENABLE; |
| 547 | unsigned long osrt = IXP4XX_LATCH & ~IXP4XX_OST_RELOAD_MASK; |
| 548 | |
| 549 | *IXP4XX_OSRT1 = osrt | opts; |
| 550 | return 0; |
| 551 | } |
| 552 | |
| 553 | static int ixp4xx_resume(struct clock_event_device *evt) |
| 554 | { |
| 555 | unsigned long opts = *IXP4XX_OSRT1 & IXP4XX_OST_RELOAD_MASK; |
| 556 | unsigned long osrt = *IXP4XX_OSRT1 & ~IXP4XX_OST_RELOAD_MASK; |
| 557 | |
| 558 | opts |= IXP4XX_OST_ENABLE; |
| 559 | *IXP4XX_OSRT1 = osrt | opts; |
| 560 | return 0; |
| 561 | } |
| 562 | |
| 563 | static struct clock_event_device clockevent_ixp4xx = { |
| 564 | .name = "ixp4xx timer1", |
| 565 | .features = CLOCK_EVT_FEAT_PERIODIC | |
| 566 | CLOCK_EVT_FEAT_ONESHOT, |
| 567 | .rating = 200, |
| 568 | .set_state_shutdown = ixp4xx_shutdown, |
| 569 | .set_state_periodic = ixp4xx_set_periodic, |
| 570 | .set_state_oneshot = ixp4xx_set_oneshot, |
| 571 | .tick_resume = ixp4xx_resume, |
| 572 | .set_next_event = ixp4xx_set_next_event, |
| 573 | }; |
| 574 | |
| 575 | static void __init ixp4xx_clockevent_init(void) |
| 576 | { |
| 577 | clockevent_ixp4xx.cpumask = cpumask_of(0); |
| 578 | clockevents_config_and_register(&clockevent_ixp4xx, IXP4XX_TIMER_FREQ, |
| 579 | 0xf, 0xfffffffe); |
| 580 | } |
| 581 | |
| 582 | void ixp4xx_restart(enum reboot_mode mode, const char *cmd) |
| 583 | { |
| 584 | if (mode == REBOOT_SOFT) { |
| 585 | /* Jump into ROM at address 0 */ |
| 586 | soft_restart(0); |
| 587 | } else { |
| 588 | /* Use on-chip reset capability */ |
| 589 | |
| 590 | /* set the "key" register to enable access to |
| 591 | * "timer" and "enable" registers |
| 592 | */ |
| 593 | *IXP4XX_OSWK = IXP4XX_WDT_KEY; |
| 594 | |
| 595 | /* write 0 to the timer register for an immediate reset */ |
| 596 | *IXP4XX_OSWT = 0; |
| 597 | |
| 598 | *IXP4XX_OSWE = IXP4XX_WDT_RESET_ENABLE | IXP4XX_WDT_COUNT_ENABLE; |
| 599 | } |
| 600 | } |
| 601 | |
| 602 | #ifdef CONFIG_PCI |
| 603 | static int ixp4xx_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) |
| 604 | { |
| 605 | return (dma_addr + size) > SZ_64M; |
| 606 | } |
| 607 | |
| 608 | static int ixp4xx_platform_notify_remove(struct device *dev) |
| 609 | { |
| 610 | if (dev_is_pci(dev)) |
| 611 | dmabounce_unregister_dev(dev); |
| 612 | |
| 613 | return 0; |
| 614 | } |
| 615 | #endif |
| 616 | |
| 617 | /* |
| 618 | * Setup DMA mask to 64MB on PCI devices and 4 GB on all other things. |
| 619 | */ |
| 620 | static int ixp4xx_platform_notify(struct device *dev) |
| 621 | { |
| 622 | dev->dma_mask = &dev->coherent_dma_mask; |
| 623 | |
| 624 | #ifdef CONFIG_PCI |
| 625 | if (dev_is_pci(dev)) { |
| 626 | dev->coherent_dma_mask = DMA_BIT_MASK(28); /* 64 MB */ |
| 627 | dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce); |
| 628 | return 0; |
| 629 | } |
| 630 | #endif |
| 631 | |
| 632 | dev->coherent_dma_mask = DMA_BIT_MASK(32); |
| 633 | return 0; |
| 634 | } |
| 635 | |
| 636 | int dma_set_coherent_mask(struct device *dev, u64 mask) |
| 637 | { |
| 638 | if (dev_is_pci(dev)) |
| 639 | mask &= DMA_BIT_MASK(28); /* 64 MB */ |
| 640 | |
| 641 | if ((mask & DMA_BIT_MASK(28)) == DMA_BIT_MASK(28)) { |
| 642 | dev->coherent_dma_mask = mask; |
| 643 | return 0; |
| 644 | } |
| 645 | |
| 646 | return -EIO; /* device wanted sub-64MB mask */ |
| 647 | } |
| 648 | EXPORT_SYMBOL(dma_set_coherent_mask); |
| 649 | |
| 650 | #ifdef CONFIG_IXP4XX_INDIRECT_PCI |
| 651 | /* |
| 652 | * In the case of using indirect PCI, we simply return the actual PCI |
| 653 | * address and our read/write implementation use that to drive the |
| 654 | * access registers. If something outside of PCI is ioremap'd, we |
| 655 | * fallback to the default. |
| 656 | */ |
| 657 | |
| 658 | static void __iomem *ixp4xx_ioremap_caller(phys_addr_t addr, size_t size, |
| 659 | unsigned int mtype, void *caller) |
| 660 | { |
| 661 | if (!is_pci_memory(addr)) |
| 662 | return __arm_ioremap_caller(addr, size, mtype, caller); |
| 663 | |
| 664 | return (void __iomem *)addr; |
| 665 | } |
| 666 | |
| 667 | static void ixp4xx_iounmap(volatile void __iomem *addr) |
| 668 | { |
| 669 | if (!is_pci_memory((__force u32)addr)) |
| 670 | __iounmap(addr); |
| 671 | } |
| 672 | #endif |
| 673 | |
| 674 | void __init ixp4xx_init_early(void) |
| 675 | { |
| 676 | platform_notify = ixp4xx_platform_notify; |
| 677 | #ifdef CONFIG_PCI |
| 678 | platform_notify_remove = ixp4xx_platform_notify_remove; |
| 679 | #endif |
| 680 | #ifdef CONFIG_IXP4XX_INDIRECT_PCI |
| 681 | arch_ioremap_caller = ixp4xx_ioremap_caller; |
| 682 | arch_iounmap = ixp4xx_iounmap; |
| 683 | #endif |
| 684 | } |