diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c index a60da6dd4d17..4c171f13b0e8 100644 --- a/arch/sh/drivers/dma/dma-sh.c +++ b/arch/sh/drivers/dma/dma-sh.c @@ -14,35 +14,72 @@ #include #include #include +#include #include #include -#include -#include +#include +#include +#include -#if defined(DMAE1_IRQ) -#define NR_DMAE 2 -#else -#define NR_DMAE 1 +/* + * Define the default configuration for dual address memory-memory transfer. + * The 0x400 value represents auto-request, external->external. + */ +#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_INDEX2VAL(XMIT_SZ_32BIT)) + +static unsigned long dma_find_base(unsigned int chan) +{ + unsigned long base = SH_DMAC_BASE0; + +#ifdef SH_DMAC_BASE1 + if (chan >= 6) + base = SH_DMAC_BASE1; #endif -static const char *dmae_name[] = { - "DMAC Address Error0", "DMAC Address Error1" + return base; +} + +static unsigned long dma_base_addr(unsigned int chan) +{ + unsigned long base = dma_find_base(chan); + + /* Normalize offset calculation */ + if (chan >= 9) + chan -= 6; + if (chan >= 4) + base += 0x10; + + return base + (chan * 0x10); +} + +#ifdef CONFIG_SH_DMA_IRQ_MULTI +static inline unsigned int get_dmte_irq(unsigned int chan) +{ + return chan >= 6 ? DMTE6_IRQ : DMTE0_IRQ; +} +#else + +static unsigned int dmte_irq_map[] = { + DMTE0_IRQ, DMTE0_IRQ + 1, DMTE0_IRQ + 2, DMTE0_IRQ + 3, + +#ifdef DMTE4_IRQ + DMTE4_IRQ, DMTE4_IRQ + 1, +#endif + +#ifdef DMTE6_IRQ + DMTE6_IRQ, DMTE6_IRQ + 1, +#endif + +#ifdef DMTE8_IRQ + DMTE8_IRQ, DMTE9_IRQ, DMTE10_IRQ, DMTE11_IRQ, +#endif }; static inline unsigned int get_dmte_irq(unsigned int chan) { - unsigned int irq = 0; - if (chan < ARRAY_SIZE(dmte_irq_map)) - irq = dmte_irq_map[chan]; - -#if defined(CONFIG_SH_DMA_IRQ_MULTI) - if (irq > DMTE6_IRQ) - return DMTE6_IRQ; - return DMTE0_IRQ; -#else - return irq; -#endif + return dmte_irq_map[chan]; } +#endif /* * We determine the correct shift size based off of the CHCR transmit size @@ -53,9 +90,10 @@ static inline unsigned int get_dmte_irq(unsigned int chan) * iterations to complete the transfer. */ static unsigned int ts_shift[] = TS_SHIFT; + static inline unsigned int calc_xmit_shift(struct dma_channel *chan) { - u32 chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); + u32 chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR); int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); @@ -73,13 +111,13 @@ static irqreturn_t dma_tei(int irq, void *dev_id) struct dma_channel *chan = dev_id; u32 chcr; - chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); + chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR); if (!(chcr & CHCR_TE)) return IRQ_NONE; chcr &= ~(CHCR_IE | CHCR_DE); - __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); + __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR)); wake_up(&chan->wait_queue); @@ -91,13 +129,8 @@ static int sh_dmac_request_dma(struct dma_channel *chan) if (unlikely(!(chan->flags & DMA_TEI_CAPABLE))) return 0; - return request_irq(get_dmte_irq(chan->chan), dma_tei, -#if defined(CONFIG_SH_DMA_IRQ_MULTI) - IRQF_SHARED, -#else - 0, -#endif - chan->dev_id, chan); + return request_irq(get_dmte_irq(chan->chan), dma_tei, IRQF_SHARED, + chan->dev_id, chan); } static void sh_dmac_free_dma(struct dma_channel *chan) @@ -118,7 +151,7 @@ sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr) chan->flags &= ~DMA_TEI_CAPABLE; } - __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); + __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR)); chan->flags |= DMA_CONFIGURED; return 0; @@ -129,13 +162,13 @@ static void sh_dmac_enable_dma(struct dma_channel *chan) int irq; u32 chcr; - chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); + chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR); chcr |= CHCR_DE; if (chan->flags & DMA_TEI_CAPABLE) chcr |= CHCR_IE; - __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); + __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR)); if (chan->flags & DMA_TEI_CAPABLE) { irq = get_dmte_irq(chan->chan); @@ -153,9 +186,9 @@ static void sh_dmac_disable_dma(struct dma_channel *chan) disable_irq(irq); } - chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); + chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR); chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); - __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); + __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR)); } static int sh_dmac_xfer_dma(struct dma_channel *chan) @@ -186,13 +219,13 @@ static int sh_dmac_xfer_dma(struct dma_channel *chan) */ if (chan->sar || (mach_is_dreamcast() && chan->chan == PVR2_CASCADE_CHAN)) - __raw_writel(chan->sar, (dma_base_addr[chan->chan]+SAR)); + __raw_writel(chan->sar, (dma_base_addr(chan->chan) + SAR)); if (chan->dar || (mach_is_dreamcast() && chan->chan == PVR2_CASCADE_CHAN)) - __raw_writel(chan->dar, (dma_base_addr[chan->chan] + DAR)); + __raw_writel(chan->dar, (dma_base_addr(chan->chan) + DAR)); __raw_writel(chan->count >> calc_xmit_shift(chan), - (dma_base_addr[chan->chan] + TCR)); + (dma_base_addr(chan->chan) + TCR)); sh_dmac_enable_dma(chan); @@ -201,13 +234,32 @@ static int sh_dmac_xfer_dma(struct dma_channel *chan) static int sh_dmac_get_dma_residue(struct dma_channel *chan) { - if (!(__raw_readl(dma_base_addr[chan->chan] + CHCR) & CHCR_DE)) + if (!(__raw_readl(dma_base_addr(chan->chan) + CHCR) & CHCR_DE)) return 0; - return __raw_readl(dma_base_addr[chan->chan] + TCR) + return __raw_readl(dma_base_addr(chan->chan) + TCR) << calc_xmit_shift(chan); } +/* + * DMAOR handling + */ +#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ + defined(CONFIG_CPU_SUBTYPE_SH7724) || \ + defined(CONFIG_CPU_SUBTYPE_SH7780) || \ + defined(CONFIG_CPU_SUBTYPE_SH7785) +#define NR_DMAOR 2 +#else +#define NR_DMAOR 1 +#endif + +/* + * DMAOR bases are broken out amongst channel groups. DMAOR0 manages + * channels 0 - 5, DMAOR1 6 - 11 (optional). + */ +#define dmaor_read_reg(n) __raw_readw(dma_find_base((n)*6)) +#define dmaor_write_reg(n, data) __raw_writew(data, dma_find_base(n)*6) + static inline int dmaor_reset(int no) { unsigned long dmaor = dmaor_read_reg(no); @@ -228,36 +280,86 @@ static inline int dmaor_reset(int no) return 0; } -#if defined(CONFIG_CPU_SH4) +/* + * DMAE handling + */ +#ifdef CONFIG_CPU_SH4 + +#if defined(DMAE1_IRQ) +#define NR_DMAE 2 +#else +#define NR_DMAE 1 +#endif + +static const char *dmae_name[] = { + "DMAC Address Error0", + "DMAC Address Error1" +}; + +#ifdef CONFIG_SH_DMA_IRQ_MULTI +static inline unsigned int get_dma_error_irq(int n) +{ + return get_dmte_irq(n * 6); +} +#else + +static unsigned int dmae_irq_map[] = { + DMAE0_IRQ, + +#ifdef DMAE1_IRQ + DMAE1_IRQ, +#endif +}; + +static inline unsigned int get_dma_error_irq(int n) +{ + return dmae_irq_map[n]; +} +#endif + static irqreturn_t dma_err(int irq, void *dummy) { -#if defined(CONFIG_SH_DMA_IRQ_MULTI) - int cnt = 0; - switch (irq) { -#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) - case DMTE6_IRQ: - cnt++; -#endif - case DMTE0_IRQ: - if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { - disable_irq(irq); - /* DMA multi and error IRQ */ - return IRQ_HANDLED; - } - default: - return IRQ_NONE; - } -#else - dmaor_reset(0); -#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ - defined(CONFIG_CPU_SUBTYPE_SH7780) || \ - defined(CONFIG_CPU_SUBTYPE_SH7785) - dmaor_reset(1); -#endif + int i; + + for (i = 0; i < NR_DMAOR; i++) + dmaor_reset(i); + disable_irq(irq); return IRQ_HANDLED; -#endif +} + +static int dmae_irq_init(void) +{ + int n; + + for (n = 0; n < NR_DMAE; n++) { + int i = request_irq(get_dma_error_irq(n), dma_err, + IRQF_SHARED, dmae_name[n], NULL); + if (unlikely(i < 0)) { + printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]); + return i; + } + } + + return 0; +} + +static void dmae_irq_free(void) +{ + int n; + + for (n = 0; n < NR_DMAE; n++) + free_irq(get_dma_error_irq(n), NULL); +} +#else +static inline int dmae_irq_init(void) +{ + return 0; +} + +static void dmae_irq_free(void) +{ } #endif @@ -276,72 +378,34 @@ static struct dma_info sh_dmac_info = { .flags = DMAC_CHANNELS_TEI_CAPABLE, }; -#ifdef CONFIG_CPU_SH4 -static unsigned int get_dma_error_irq(int n) -{ -#if defined(CONFIG_SH_DMA_IRQ_MULTI) - return (n == 0) ? get_dmte_irq(0) : get_dmte_irq(6); -#else - return (n == 0) ? DMAE0_IRQ : -#if defined(DMAE1_IRQ) - DMAE1_IRQ; -#else - -1; -#endif -#endif -} -#endif - static int __init sh_dmac_init(void) { struct dma_info *info = &sh_dmac_info; - int i; + int i, rc; -#ifdef CONFIG_CPU_SH4 - int n; - - for (n = 0; n < NR_DMAE; n++) { - i = request_irq(get_dma_error_irq(n), dma_err, -#if defined(CONFIG_SH_DMA_IRQ_MULTI) - IRQF_SHARED, -#else - 0, -#endif - dmae_name[n], (void *)dmae_name[n]); - if (unlikely(i < 0)) { - printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]); - return i; - } - } -#endif /* CONFIG_CPU_SH4 */ + /* + * Initialize DMAE, for parts that support it. + */ + rc = dmae_irq_init(); + if (unlikely(rc != 0)) + return rc; /* * Initialize DMAOR, and clean up any error flags that may have * been set. */ - i = dmaor_reset(0); - if (unlikely(i != 0)) - return i; -#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ - defined(CONFIG_CPU_SUBTYPE_SH7780) || \ - defined(CONFIG_CPU_SUBTYPE_SH7785) - i = dmaor_reset(1); - if (unlikely(i != 0)) - return i; -#endif + for (i = 0; i < NR_DMAOR; i++) { + rc = dmaor_reset(i); + if (unlikely(rc != 0)) + return rc; + } return register_dmac(info); } static void __exit sh_dmac_exit(void) { -#ifdef CONFIG_CPU_SH4 - int n; - - for (n = 0; n < NR_DMAE; n++) { - free_irq(get_dma_error_irq(n), (void *)dmae_name[n]); - } -#endif /* CONFIG_CPU_SH4 */ + dmae_irq_free(); unregister_dmac(&sh_dmac_info); } diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h deleted file mode 100644 index 9a4875a89636..000000000000 --- a/arch/sh/include/asm/dma-sh.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * arch/sh/include/asm/dma-sh.h - * - * Copyright (C) 2000 Takashi YOSHII - * Copyright (C) 2003 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#ifndef __DMA_SH_H -#define __DMA_SH_H - -#include -#include -#include - -/* DMAOR contorl: The DMAOR access size is different by CPU.*/ -#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ - defined(CONFIG_CPU_SUBTYPE_SH7724) || \ - defined(CONFIG_CPU_SUBTYPE_SH7780) || \ - defined(CONFIG_CPU_SUBTYPE_SH7785) -#define dmaor_read_reg(n) \ - (n ? __raw_readw(SH_DMAC_BASE1 + DMAOR) \ - : __raw_readw(SH_DMAC_BASE0 + DMAOR)) -#define dmaor_write_reg(n, data) \ - (n ? __raw_writew(data, SH_DMAC_BASE1 + DMAOR) \ - : __raw_writew(data, SH_DMAC_BASE0 + DMAOR)) -#else /* Other CPU */ -#define dmaor_read_reg(n) __raw_readw(SH_DMAC_BASE0 + DMAOR) -#define dmaor_write_reg(n, data) __raw_writew(data, SH_DMAC_BASE0 + DMAOR) -#endif - -static int dmte_irq_map[] __maybe_unused = { -#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 4) - DMTE0_IRQ, - DMTE0_IRQ + 1, - DMTE0_IRQ + 2, - DMTE0_IRQ + 3, -#endif -#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 6) - DMTE4_IRQ, - DMTE4_IRQ + 1, -#endif -#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 8) - DMTE6_IRQ, - DMTE6_IRQ + 1, -#endif -#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 12) - DMTE8_IRQ, - DMTE9_IRQ, - DMTE10_IRQ, - DMTE11_IRQ, -#endif -}; - -/* - * Define the default configuration for dual address memory-memory transfer. - * The 0x400 value represents auto-request, external->external. - */ -#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_INDEX2VAL(XMIT_SZ_32BIT)) - -/* DMA base address */ -static u32 dma_base_addr[] __maybe_unused = { -#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 4) - SH_DMAC_BASE0 + 0x00, /* channel 0 */ - SH_DMAC_BASE0 + 0x10, - SH_DMAC_BASE0 + 0x20, - SH_DMAC_BASE0 + 0x30, -#endif -#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 6) - SH_DMAC_BASE0 + 0x50, - SH_DMAC_BASE0 + 0x60, -#endif -#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 8) - SH_DMAC_BASE1 + 0x00, - SH_DMAC_BASE1 + 0x10, -#endif -#if (CONFIG_NR_ONCHIP_DMA_CHANNELS >= 12) - SH_DMAC_BASE1 + 0x20, - SH_DMAC_BASE1 + 0x30, - SH_DMAC_BASE1 + 0x50, - SH_DMAC_BASE1 + 0x60, /* channel 11 */ -#endif -}; - -#endif /* __DMA_SH_H */ diff --git a/arch/sh/include/cpu-sh4a/cpu/dma.h b/arch/sh/include/cpu-sh4a/cpu/dma.h index c276313104c7..89afb650ce25 100644 --- a/arch/sh/include/cpu-sh4a/cpu/dma.h +++ b/arch/sh/include/cpu-sh4a/cpu/dma.h @@ -9,20 +9,17 @@ #define DMTE4_IRQ evt2irq(0xb80) #define DMAE0_IRQ evt2irq(0xbc0) /* DMA Error IRQ*/ #define SH_DMAC_BASE0 0xFE008020 -#define SH_DMARS_BASE0 0xFE009000 #elif defined(CONFIG_CPU_SUBTYPE_SH7722) #define DMTE0_IRQ evt2irq(0x800) #define DMTE4_IRQ evt2irq(0xb80) #define DMAE0_IRQ evt2irq(0xbc0) /* DMA Error IRQ*/ #define SH_DMAC_BASE0 0xFE008020 -#define SH_DMARS_BASE0 0xFE009000 #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ defined(CONFIG_CPU_SUBTYPE_SH7764) #define DMTE0_IRQ evt2irq(0x640) #define DMTE4_IRQ evt2irq(0x780) #define DMAE0_IRQ evt2irq(0x6c0) #define SH_DMAC_BASE0 0xFF608020 -#define SH_DMARS_BASE0 0xFF609000 #elif defined(CONFIG_CPU_SUBTYPE_SH7723) #define DMTE0_IRQ evt2irq(0x800) /* DMAC0A*/ #define DMTE4_IRQ evt2irq(0xb80) /* DMAC0B */ @@ -35,7 +32,6 @@ #define DMAE1_IRQ evt2irq(0xb40) /* DMA Error IRQ*/ #define SH_DMAC_BASE0 0xFE008020 #define SH_DMAC_BASE1 0xFDC08020 -#define SH_DMARS_BASE0 0xFDC09000 #elif defined(CONFIG_CPU_SUBTYPE_SH7724) #define DMTE0_IRQ evt2irq(0x800) /* DMAC0A*/ #define DMTE4_IRQ evt2irq(0xb80) /* DMAC0B */ @@ -48,8 +44,6 @@ #define DMAE1_IRQ evt2irq(0xb40) /* DMA Error IRQ*/ #define SH_DMAC_BASE0 0xFE008020 #define SH_DMAC_BASE1 0xFDC08020 -#define SH_DMARS_BASE0 0xFE009000 -#define SH_DMARS_BASE1 0xFDC09000 #elif defined(CONFIG_CPU_SUBTYPE_SH7780) #define DMTE0_IRQ evt2irq(0x640) #define DMTE4_IRQ evt2irq(0x780) @@ -61,7 +55,6 @@ #define DMAE0_IRQ evt2irq(0x6c0) /* DMA Error IRQ */ #define SH_DMAC_BASE0 0xFC808020 #define SH_DMAC_BASE1 0xFC818020 -#define SH_DMARS_BASE0 0xFC809000 #else /* SH7785 */ #define DMTE0_IRQ evt2irq(0x620) #define DMTE4_IRQ evt2irq(0x6a0) @@ -74,7 +67,6 @@ #define DMAE1_IRQ evt2irq(0x940) /* DMA Error IRQ1 */ #define SH_DMAC_BASE0 0xFC808020 #define SH_DMAC_BASE1 0xFCC08020 -#define SH_DMARS_BASE0 0xFC809000 #endif #endif /* __ASM_SH_CPU_SH4_DMA_SH7780_H */