1 /*
2 * arch/arm/mach-ixp4xx/common.c
3 *
4 * Generic code shared across all IXP4XX platforms
5 *
6 * Maintainer: Deepak Saxena <dsaxena@plexity.net>
7 *
8 * Copyright 2002 (c) Intel Corporation
9 * Copyright 2003-2004 (c) MontaVista, Software, Inc.
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/init.h>
19 #include <linux/serial.h>
20 #include <linux/tty.h>
21 #include <linux/platform_device.h>
22 #include <linux/serial_core.h>
23 #include <linux/interrupt.h>
24 #include <linux/bitops.h>
25 #include <linux/time.h>
26 #include <linux/clocksource.h>
27 #include <linux/clockchips.h>
28 #include <linux/io.h>
29 #include <linux/export.h>
30 #include <linux/gpio.h>
31 #include <linux/cpu.h>
32 #include <linux/pci.h>
33 #include <linux/sched_clock.h>
34 #include <mach/udc.h>
35 #include <mach/hardware.h>
36 #include <mach/io.h>
37 #include <asm/uaccess.h>
38 #include <asm/pgtable.h>
39 #include <asm/page.h>
40 #include <asm/irq.h>
41 #include <asm/system_misc.h>
42 #include <asm/mach/map.h>
43 #include <asm/mach/irq.h>
44 #include <asm/mach/time.h>
45
46 #define IXP4XX_TIMER_FREQ 66666000
47
48 /*
49 * The timer register doesn't allow to specify the two least significant bits of
50 * the timeout value and assumes them being zero. So make sure IXP4XX_LATCH is
51 * the best value with the two least significant bits unset.
52 */
53 #define IXP4XX_LATCH DIV_ROUND_CLOSEST(IXP4XX_TIMER_FREQ, \
54 (IXP4XX_OST_RELOAD_MASK + 1) * HZ) * \
55 (IXP4XX_OST_RELOAD_MASK + 1)
56
57 static void __init ixp4xx_clocksource_init(void);
58 static void __init ixp4xx_clockevent_init(void);
59 static struct clock_event_device clockevent_ixp4xx;
60
61 /*************************************************************************
62 * IXP4xx chipset I/O mapping
63 *************************************************************************/
64 static struct map_desc ixp4xx_io_desc[] __initdata = {
65 { /* UART, Interrupt ctrl, GPIO, timers, NPEs, MACs, USB .... */
66 .virtual = (unsigned long)IXP4XX_PERIPHERAL_BASE_VIRT,
67 .pfn = __phys_to_pfn(IXP4XX_PERIPHERAL_BASE_PHYS),
68 .length = IXP4XX_PERIPHERAL_REGION_SIZE,
69 .type = MT_DEVICE
70 }, { /* Expansion Bus Config Registers */
71 .virtual = (unsigned long)IXP4XX_EXP_CFG_BASE_VIRT,
72 .pfn = __phys_to_pfn(IXP4XX_EXP_CFG_BASE_PHYS),
73 .length = IXP4XX_EXP_CFG_REGION_SIZE,
74 .type = MT_DEVICE
75 }, { /* PCI Registers */
76 .virtual = (unsigned long)IXP4XX_PCI_CFG_BASE_VIRT,
77 .pfn = __phys_to_pfn(IXP4XX_PCI_CFG_BASE_PHYS),
78 .length = IXP4XX_PCI_CFG_REGION_SIZE,
79 .type = MT_DEVICE
80 }, { /* Queue Manager */
81 .virtual = (unsigned long)IXP4XX_QMGR_BASE_VIRT,
82 .pfn = __phys_to_pfn(IXP4XX_QMGR_BASE_PHYS),
83 .length = IXP4XX_QMGR_REGION_SIZE,
84 .type = MT_DEVICE
85 },
86 };
87
ixp4xx_map_io(void)88 void __init ixp4xx_map_io(void)
89 {
90 iotable_init(ixp4xx_io_desc, ARRAY_SIZE(ixp4xx_io_desc));
91 }
92
93 /*
94 * GPIO-functions
95 */
96 /*
97 * The following converted to the real HW bits the gpio_line_config
98 */
99 /* GPIO pin types */
100 #define IXP4XX_GPIO_OUT 0x1
101 #define IXP4XX_GPIO_IN 0x2
102
103 /* GPIO signal types */
104 #define IXP4XX_GPIO_LOW 0
105 #define IXP4XX_GPIO_HIGH 1
106
107 /* GPIO Clocks */
108 #define IXP4XX_GPIO_CLK_0 14
109 #define IXP4XX_GPIO_CLK_1 15
110
gpio_line_config(u8 line,u32 direction)111 static void gpio_line_config(u8 line, u32 direction)
112 {
113 if (direction == IXP4XX_GPIO_IN)
114 *IXP4XX_GPIO_GPOER |= (1 << line);
115 else
116 *IXP4XX_GPIO_GPOER &= ~(1 << line);
117 }
118
gpio_line_get(u8 line,int * value)119 static void gpio_line_get(u8 line, int *value)
120 {
121 *value = (*IXP4XX_GPIO_GPINR >> line) & 0x1;
122 }
123
gpio_line_set(u8 line,int value)124 static void gpio_line_set(u8 line, int value)
125 {
126 if (value == IXP4XX_GPIO_HIGH)
127 *IXP4XX_GPIO_GPOUTR |= (1 << line);
128 else if (value == IXP4XX_GPIO_LOW)
129 *IXP4XX_GPIO_GPOUTR &= ~(1 << line);
130 }
131
132 /*************************************************************************
133 * IXP4xx chipset IRQ handling
134 *
135 * TODO: GPIO IRQs should be marked invalid until the user of the IRQ
136 * (be it PCI or something else) configures that GPIO line
137 * as an IRQ.
138 **************************************************************************/
139 enum ixp4xx_irq_type {
140 IXP4XX_IRQ_LEVEL, IXP4XX_IRQ_EDGE
141 };
142
143 /* Each bit represents an IRQ: 1: edge-triggered, 0: level triggered */
144 static unsigned long long ixp4xx_irq_edge = 0;
145
146 /*
147 * IRQ -> GPIO mapping table
148 */
149 static signed char irq2gpio[32] = {
150 -1, -1, -1, -1, -1, -1, 0, 1,
151 -1, -1, -1, -1, -1, -1, -1, -1,
152 -1, -1, -1, 2, 3, 4, 5, 6,
153 7, 8, 9, 10, 11, 12, -1, -1,
154 };
155
ixp4xx_gpio_to_irq(struct gpio_chip * chip,unsigned gpio)156 static int ixp4xx_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
157 {
158 int irq;
159
160 for (irq = 0; irq < 32; irq++) {
161 if (irq2gpio[irq] == gpio)
162 return irq;
163 }
164 return -EINVAL;
165 }
166
ixp4xx_set_irq_type(struct irq_data * d,unsigned int type)167 static int ixp4xx_set_irq_type(struct irq_data *d, unsigned int type)
168 {
169 int line = irq2gpio[d->irq];
170 u32 int_style;
171 enum ixp4xx_irq_type irq_type;
172 volatile u32 *int_reg;
173
174 /*
175 * Only for GPIO IRQs
176 */
177 if (line < 0)
178 return -EINVAL;
179
180 switch (type){
181 case IRQ_TYPE_EDGE_BOTH:
182 int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL;
183 irq_type = IXP4XX_IRQ_EDGE;
184 break;
185 case IRQ_TYPE_EDGE_RISING:
186 int_style = IXP4XX_GPIO_STYLE_RISING_EDGE;
187 irq_type = IXP4XX_IRQ_EDGE;
188 break;
189 case IRQ_TYPE_EDGE_FALLING:
190 int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE;
191 irq_type = IXP4XX_IRQ_EDGE;
192 break;
193 case IRQ_TYPE_LEVEL_HIGH:
194 int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH;
195 irq_type = IXP4XX_IRQ_LEVEL;
196 break;
197 case IRQ_TYPE_LEVEL_LOW:
198 int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW;
199 irq_type = IXP4XX_IRQ_LEVEL;
200 break;
201 default:
202 return -EINVAL;
203 }
204
205 if (irq_type == IXP4XX_IRQ_EDGE)
206 ixp4xx_irq_edge |= (1 << d->irq);
207 else
208 ixp4xx_irq_edge &= ~(1 << d->irq);
209
210 if (line >= 8) { /* pins 8-15 */
211 line -= 8;
212 int_reg = IXP4XX_GPIO_GPIT2R;
213 } else { /* pins 0-7 */
214 int_reg = IXP4XX_GPIO_GPIT1R;
215 }
216
217 /* Clear the style for the appropriate pin */
218 *int_reg &= ~(IXP4XX_GPIO_STYLE_CLEAR <<
219 (line * IXP4XX_GPIO_STYLE_SIZE));
220
221 *IXP4XX_GPIO_GPISR = (1 << line);
222
223 /* Set the new style */
224 *int_reg |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
225
226 /* Configure the line as an input */
227 gpio_line_config(irq2gpio[d->irq], IXP4XX_GPIO_IN);
228
229 return 0;
230 }
231
ixp4xx_irq_mask(struct irq_data * d)232 static void ixp4xx_irq_mask(struct irq_data *d)
233 {
234 if ((cpu_is_ixp46x() || cpu_is_ixp43x()) && d->irq >= 32)
235 *IXP4XX_ICMR2 &= ~(1 << (d->irq - 32));
236 else
237 *IXP4XX_ICMR &= ~(1 << d->irq);
238 }
239
ixp4xx_irq_ack(struct irq_data * d)240 static void ixp4xx_irq_ack(struct irq_data *d)
241 {
242 int line = (d->irq < 32) ? irq2gpio[d->irq] : -1;
243
244 if (line >= 0)
245 *IXP4XX_GPIO_GPISR = (1 << line);
246 }
247
248 /*
249 * Level triggered interrupts on GPIO lines can only be cleared when the
250 * interrupt condition disappears.
251 */
ixp4xx_irq_unmask(struct irq_data * d)252 static void ixp4xx_irq_unmask(struct irq_data *d)
253 {
254 if (!(ixp4xx_irq_edge & (1 << d->irq)))
255 ixp4xx_irq_ack(d);
256
257 if ((cpu_is_ixp46x() || cpu_is_ixp43x()) && d->irq >= 32)
258 *IXP4XX_ICMR2 |= (1 << (d->irq - 32));
259 else
260 *IXP4XX_ICMR |= (1 << d->irq);
261 }
262
263 static struct irq_chip ixp4xx_irq_chip = {
264 .name = "IXP4xx",
265 .irq_ack = ixp4xx_irq_ack,
266 .irq_mask = ixp4xx_irq_mask,
267 .irq_unmask = ixp4xx_irq_unmask,
268 .irq_set_type = ixp4xx_set_irq_type,
269 };
270
ixp4xx_init_irq(void)271 void __init ixp4xx_init_irq(void)
272 {
273 int i = 0;
274
275 /*
276 * ixp4xx does not implement the XScale PWRMODE register
277 * so it must not call cpu_do_idle().
278 */
279 cpu_idle_poll_ctrl(true);
280
281 /* Route all sources to IRQ instead of FIQ */
282 *IXP4XX_ICLR = 0x0;
283
284 /* Disable all interrupt */
285 *IXP4XX_ICMR = 0x0;
286
287 if (cpu_is_ixp46x() || cpu_is_ixp43x()) {
288 /* Route upper 32 sources to IRQ instead of FIQ */
289 *IXP4XX_ICLR2 = 0x00;
290
291 /* Disable upper 32 interrupts */
292 *IXP4XX_ICMR2 = 0x00;
293 }
294
295 /* Default to all level triggered */
296 for(i = 0; i < NR_IRQS; i++) {
297 irq_set_chip_and_handler(i, &ixp4xx_irq_chip,
298 handle_level_irq);
299 set_irq_flags(i, IRQF_VALID);
300 }
301 }
302
303
304 /*************************************************************************
305 * IXP4xx timer tick
306 * We use OS timer1 on the CPU for the timer tick and the timestamp
307 * counter as a source of real clock ticks to account for missed jiffies.
308 *************************************************************************/
309
ixp4xx_timer_interrupt(int irq,void * dev_id)310 static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id)
311 {
312 struct clock_event_device *evt = dev_id;
313
314 /* Clear Pending Interrupt by writing '1' to it */
315 *IXP4XX_OSST = IXP4XX_OSST_TIMER_1_PEND;
316
317 evt->event_handler(evt);
318
319 return IRQ_HANDLED;
320 }
321
322 static struct irqaction ixp4xx_timer_irq = {
323 .name = "timer1",
324 .flags = IRQF_TIMER | IRQF_IRQPOLL,
325 .handler = ixp4xx_timer_interrupt,
326 .dev_id = &clockevent_ixp4xx,
327 };
328
ixp4xx_timer_init(void)329 void __init ixp4xx_timer_init(void)
330 {
331 /* Reset/disable counter */
332 *IXP4XX_OSRT1 = 0;
333
334 /* Clear Pending Interrupt by writing '1' to it */
335 *IXP4XX_OSST = IXP4XX_OSST_TIMER_1_PEND;
336
337 /* Reset time-stamp counter */
338 *IXP4XX_OSTS = 0;
339
340 /* Connect the interrupt handler and enable the interrupt */
341 setup_irq(IRQ_IXP4XX_TIMER1, &ixp4xx_timer_irq);
342
343 ixp4xx_clocksource_init();
344 ixp4xx_clockevent_init();
345 }
346
347 static struct pxa2xx_udc_mach_info ixp4xx_udc_info;
348
ixp4xx_set_udc_info(struct pxa2xx_udc_mach_info * info)349 void __init ixp4xx_set_udc_info(struct pxa2xx_udc_mach_info *info)
350 {
351 memcpy(&ixp4xx_udc_info, info, sizeof *info);
352 }
353
354 static struct resource ixp4xx_udc_resources[] = {
355 [0] = {
356 .start = 0xc800b000,
357 .end = 0xc800bfff,
358 .flags = IORESOURCE_MEM,
359 },
360 [1] = {
361 .start = IRQ_IXP4XX_USB,
362 .end = IRQ_IXP4XX_USB,
363 .flags = IORESOURCE_IRQ,
364 },
365 };
366
367 /*
368 * USB device controller. The IXP4xx uses the same controller as PXA25X,
369 * so we just use the same device.
370 */
371 static struct platform_device ixp4xx_udc_device = {
372 .name = "pxa25x-udc",
373 .id = -1,
374 .num_resources = 2,
375 .resource = ixp4xx_udc_resources,
376 .dev = {
377 .platform_data = &ixp4xx_udc_info,
378 },
379 };
380
381 static struct platform_device *ixp4xx_devices[] __initdata = {
382 &ixp4xx_udc_device,
383 };
384
385 static struct resource ixp46x_i2c_resources[] = {
386 [0] = {
387 .start = 0xc8011000,
388 .end = 0xc801101c,
389 .flags = IORESOURCE_MEM,
390 },
391 [1] = {
392 .start = IRQ_IXP4XX_I2C,
393 .end = IRQ_IXP4XX_I2C,
394 .flags = IORESOURCE_IRQ
395 }
396 };
397
398 /*
399 * I2C controller. The IXP46x uses the same block as the IOP3xx, so
400 * we just use the same device name.
401 */
402 static struct platform_device ixp46x_i2c_controller = {
403 .name = "IOP3xx-I2C",
404 .id = 0,
405 .num_resources = 2,
406 .resource = ixp46x_i2c_resources
407 };
408
409 static struct platform_device *ixp46x_devices[] __initdata = {
410 &ixp46x_i2c_controller
411 };
412
413 unsigned long ixp4xx_exp_bus_size;
414 EXPORT_SYMBOL(ixp4xx_exp_bus_size);
415
ixp4xx_gpio_direction_input(struct gpio_chip * chip,unsigned gpio)416 static int ixp4xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
417 {
418 gpio_line_config(gpio, IXP4XX_GPIO_IN);
419
420 return 0;
421 }
422
ixp4xx_gpio_direction_output(struct gpio_chip * chip,unsigned gpio,int level)423 static int ixp4xx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
424 int level)
425 {
426 gpio_line_set(gpio, level);
427 gpio_line_config(gpio, IXP4XX_GPIO_OUT);
428
429 return 0;
430 }
431
ixp4xx_gpio_get_value(struct gpio_chip * chip,unsigned gpio)432 static int ixp4xx_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
433 {
434 int value;
435
436 gpio_line_get(gpio, &value);
437
438 return value;
439 }
440
ixp4xx_gpio_set_value(struct gpio_chip * chip,unsigned gpio,int value)441 static void ixp4xx_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
442 int value)
443 {
444 gpio_line_set(gpio, value);
445 }
446
447 static struct gpio_chip ixp4xx_gpio_chip = {
448 .label = "IXP4XX_GPIO_CHIP",
449 .direction_input = ixp4xx_gpio_direction_input,
450 .direction_output = ixp4xx_gpio_direction_output,
451 .get = ixp4xx_gpio_get_value,
452 .set = ixp4xx_gpio_set_value,
453 .to_irq = ixp4xx_gpio_to_irq,
454 .base = 0,
455 .ngpio = 16,
456 };
457
ixp4xx_sys_init(void)458 void __init ixp4xx_sys_init(void)
459 {
460 ixp4xx_exp_bus_size = SZ_16M;
461
462 platform_add_devices(ixp4xx_devices, ARRAY_SIZE(ixp4xx_devices));
463
464 gpiochip_add(&ixp4xx_gpio_chip);
465
466 if (cpu_is_ixp46x()) {
467 int region;
468
469 platform_add_devices(ixp46x_devices,
470 ARRAY_SIZE(ixp46x_devices));
471
472 for (region = 0; region < 7; region++) {
473 if((*(IXP4XX_EXP_REG(0x4 * region)) & 0x200)) {
474 ixp4xx_exp_bus_size = SZ_32M;
475 break;
476 }
477 }
478 }
479
480 printk("IXP4xx: Using %luMiB expansion bus window size\n",
481 ixp4xx_exp_bus_size >> 20);
482 }
483
484 /*
485 * sched_clock()
486 */
ixp4xx_read_sched_clock(void)487 static u64 notrace ixp4xx_read_sched_clock(void)
488 {
489 return *IXP4XX_OSTS;
490 }
491
492 /*
493 * clocksource
494 */
495
ixp4xx_clocksource_read(struct clocksource * c)496 static cycle_t ixp4xx_clocksource_read(struct clocksource *c)
497 {
498 return *IXP4XX_OSTS;
499 }
500
501 unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ;
502 EXPORT_SYMBOL(ixp4xx_timer_freq);
ixp4xx_clocksource_init(void)503 static void __init ixp4xx_clocksource_init(void)
504 {
505 sched_clock_register(ixp4xx_read_sched_clock, 32, ixp4xx_timer_freq);
506
507 clocksource_mmio_init(NULL, "OSTS", ixp4xx_timer_freq, 200, 32,
508 ixp4xx_clocksource_read);
509 }
510
511 /*
512 * clockevents
513 */
ixp4xx_set_next_event(unsigned long evt,struct clock_event_device * unused)514 static int ixp4xx_set_next_event(unsigned long evt,
515 struct clock_event_device *unused)
516 {
517 unsigned long opts = *IXP4XX_OSRT1 & IXP4XX_OST_RELOAD_MASK;
518
519 *IXP4XX_OSRT1 = (evt & ~IXP4XX_OST_RELOAD_MASK) | opts;
520
521 return 0;
522 }
523
ixp4xx_set_mode(enum clock_event_mode mode,struct clock_event_device * evt)524 static void ixp4xx_set_mode(enum clock_event_mode mode,
525 struct clock_event_device *evt)
526 {
527 unsigned long opts = *IXP4XX_OSRT1 & IXP4XX_OST_RELOAD_MASK;
528 unsigned long osrt = *IXP4XX_OSRT1 & ~IXP4XX_OST_RELOAD_MASK;
529
530 switch (mode) {
531 case CLOCK_EVT_MODE_PERIODIC:
532 osrt = IXP4XX_LATCH & ~IXP4XX_OST_RELOAD_MASK;
533 opts = IXP4XX_OST_ENABLE;
534 break;
535 case CLOCK_EVT_MODE_ONESHOT:
536 /* period set by 'set next_event' */
537 osrt = 0;
538 opts = IXP4XX_OST_ENABLE | IXP4XX_OST_ONE_SHOT;
539 break;
540 case CLOCK_EVT_MODE_SHUTDOWN:
541 opts &= ~IXP4XX_OST_ENABLE;
542 break;
543 case CLOCK_EVT_MODE_RESUME:
544 opts |= IXP4XX_OST_ENABLE;
545 break;
546 case CLOCK_EVT_MODE_UNUSED:
547 default:
548 osrt = opts = 0;
549 break;
550 }
551
552 *IXP4XX_OSRT1 = osrt | opts;
553 }
554
555 static struct clock_event_device clockevent_ixp4xx = {
556 .name = "ixp4xx timer1",
557 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
558 .rating = 200,
559 .set_mode = ixp4xx_set_mode,
560 .set_next_event = ixp4xx_set_next_event,
561 };
562
ixp4xx_clockevent_init(void)563 static void __init ixp4xx_clockevent_init(void)
564 {
565 clockevent_ixp4xx.cpumask = cpumask_of(0);
566 clockevents_config_and_register(&clockevent_ixp4xx, IXP4XX_TIMER_FREQ,
567 0xf, 0xfffffffe);
568 }
569
ixp4xx_restart(enum reboot_mode mode,const char * cmd)570 void ixp4xx_restart(enum reboot_mode mode, const char *cmd)
571 {
572 if (mode == REBOOT_SOFT) {
573 /* Jump into ROM at address 0 */
574 soft_restart(0);
575 } else {
576 /* Use on-chip reset capability */
577
578 /* set the "key" register to enable access to
579 * "timer" and "enable" registers
580 */
581 *IXP4XX_OSWK = IXP4XX_WDT_KEY;
582
583 /* write 0 to the timer register for an immediate reset */
584 *IXP4XX_OSWT = 0;
585
586 *IXP4XX_OSWE = IXP4XX_WDT_RESET_ENABLE | IXP4XX_WDT_COUNT_ENABLE;
587 }
588 }
589
590 #ifdef CONFIG_PCI
ixp4xx_needs_bounce(struct device * dev,dma_addr_t dma_addr,size_t size)591 static int ixp4xx_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
592 {
593 return (dma_addr + size) > SZ_64M;
594 }
595
ixp4xx_platform_notify_remove(struct device * dev)596 static int ixp4xx_platform_notify_remove(struct device *dev)
597 {
598 if (dev_is_pci(dev))
599 dmabounce_unregister_dev(dev);
600
601 return 0;
602 }
603 #endif
604
605 /*
606 * Setup DMA mask to 64MB on PCI devices and 4 GB on all other things.
607 */
ixp4xx_platform_notify(struct device * dev)608 static int ixp4xx_platform_notify(struct device *dev)
609 {
610 dev->dma_mask = &dev->coherent_dma_mask;
611
612 #ifdef CONFIG_PCI
613 if (dev_is_pci(dev)) {
614 dev->coherent_dma_mask = DMA_BIT_MASK(28); /* 64 MB */
615 dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce);
616 return 0;
617 }
618 #endif
619
620 dev->coherent_dma_mask = DMA_BIT_MASK(32);
621 return 0;
622 }
623
dma_set_coherent_mask(struct device * dev,u64 mask)624 int dma_set_coherent_mask(struct device *dev, u64 mask)
625 {
626 if (dev_is_pci(dev))
627 mask &= DMA_BIT_MASK(28); /* 64 MB */
628
629 if ((mask & DMA_BIT_MASK(28)) == DMA_BIT_MASK(28)) {
630 dev->coherent_dma_mask = mask;
631 return 0;
632 }
633
634 return -EIO; /* device wanted sub-64MB mask */
635 }
636 EXPORT_SYMBOL(dma_set_coherent_mask);
637
638 #ifdef CONFIG_IXP4XX_INDIRECT_PCI
639 /*
640 * In the case of using indirect PCI, we simply return the actual PCI
641 * address and our read/write implementation use that to drive the
642 * access registers. If something outside of PCI is ioremap'd, we
643 * fallback to the default.
644 */
645
ixp4xx_ioremap_caller(phys_addr_t addr,size_t size,unsigned int mtype,void * caller)646 static void __iomem *ixp4xx_ioremap_caller(phys_addr_t addr, size_t size,
647 unsigned int mtype, void *caller)
648 {
649 if (!is_pci_memory(addr))
650 return __arm_ioremap_caller(addr, size, mtype, caller);
651
652 return (void __iomem *)addr;
653 }
654
ixp4xx_iounmap(void __iomem * addr)655 static void ixp4xx_iounmap(void __iomem *addr)
656 {
657 if (!is_pci_memory((__force u32)addr))
658 __iounmap(addr);
659 }
660 #endif
661
ixp4xx_init_early(void)662 void __init ixp4xx_init_early(void)
663 {
664 platform_notify = ixp4xx_platform_notify;
665 #ifdef CONFIG_PCI
666 platform_notify_remove = ixp4xx_platform_notify_remove;
667 #endif
668 #ifdef CONFIG_IXP4XX_INDIRECT_PCI
669 arch_ioremap_caller = ixp4xx_ioremap_caller;
670 arch_iounmap = ixp4xx_iounmap;
671 #endif
672 }
673