diff options
Diffstat (limited to 'drivers/dma')
| -rw-r--r-- | drivers/dma/Kconfig | 6 | ||||
| -rw-r--r-- | drivers/dma/Makefile | 1 | ||||
| -rw-r--r-- | drivers/dma/bcm2708-dmaengine.c | 281 | ||||
| -rw-r--r-- | drivers/dma/bcm2835-dma.c | 686 | ||||
| -rw-r--r-- | drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c | 188 |
5 files changed, 1019 insertions, 143 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index b8a74b1798ba..94fefca814bf 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -143,7 +143,7 @@ config BCM_SBA_RAID config DMA_BCM2835 tristate "BCM2835 DMA engine support" - depends on ARCH_BCM2835 + depends on ARCH_BCM2835 || ARCH_BCM2708 || ARCH_BCM2709 select DMA_ENGINE select DMA_VIRTUAL_CHANNELS @@ -685,6 +685,10 @@ config UNIPHIER_XDMAC UniPhier platform. This DMA controller can transfer data from memory to memory, memory to peripheral and peripheral to memory. +config DMA_BCM2708 + tristate "BCM2708 DMA legacy API support" + depends on DMA_BCM2835 + config XGENE_DMA tristate "APM X-Gene DMA support" depends on ARCH_XGENE || COMPILE_TEST diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index a54d7688392b..d45d964899b4 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_AT_XDMAC) += at_xdmac.o obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o obj-$(CONFIG_BCM_SBA_RAID) += bcm-sba-raid.o +obj-$(CONFIG_DMA_BCM2708) += bcm2708-dmaengine.o obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o diff --git a/drivers/dma/bcm2708-dmaengine.c b/drivers/dma/bcm2708-dmaengine.c new file mode 100644 index 000000000000..a9a7f92584c8 --- /dev/null +++ b/drivers/dma/bcm2708-dmaengine.c @@ -0,0 +1,281 @@ +/* + * BCM2708 legacy DMA API + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/platform_data/dma-bcm2708.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/spinlock.h> + +#include "virt-dma.h" + +#define CACHE_LINE_MASK 31 +#define DEFAULT_DMACHAN_BITMAP 0x10 /* channel 4 only */ + +/* valid only for channels 0 - 14, 15 has its own base address */ +#define BCM2708_DMA_CHAN(n) ((n) << 8) /* base address */ +#define BCM2708_DMA_CHANIO(dma_base, n) \ + ((void __iomem *)((char *)(dma_base) + BCM2708_DMA_CHAN(n))) + +struct vc_dmaman { + void __iomem *dma_base; + u32 chan_available; /* bitmap of available channels */ + u32 has_feature[BCM_DMA_FEATURE_COUNT]; /* bitmap of feature presence */ + struct mutex lock; +}; + +static struct device *dmaman_dev; /* we assume there's only one! */ +static struct vc_dmaman *g_dmaman; /* DMA manager */ + +/* DMA Auxiliary Functions */ + +/* A DMA buffer on an arbitrary boundary may separate a cache line into a + section inside the DMA buffer and another section outside it. + Even if we flush DMA buffers from the cache there is always the chance that + during a DMA someone will access the part of a cache line that is outside + the DMA buffer - which will then bring in unwelcome data. + Without being able to dictate our own buffer pools we must insist that + DMA buffers consist of a whole number of cache lines. +*/ +extern int bcm_sg_suitable_for_dma(struct scatterlist *sg_ptr, int sg_len) +{ + int i; + + for (i = 0; i < sg_len; i++) { + if (sg_ptr[i].offset & CACHE_LINE_MASK || + sg_ptr[i].length & CACHE_LINE_MASK) + return 0; + } + + return 1; +} +EXPORT_SYMBOL_GPL(bcm_sg_suitable_for_dma); + +extern void bcm_dma_start(void __iomem *dma_chan_base, + dma_addr_t control_block) +{ + dsb(sy); /* ARM data synchronization (push) operation */ + + writel(control_block, dma_chan_base + BCM2708_DMA_ADDR); + writel(BCM2708_DMA_ACTIVE, dma_chan_base + BCM2708_DMA_CS); +} +EXPORT_SYMBOL_GPL(bcm_dma_start); + +extern void bcm_dma_wait_idle(void __iomem *dma_chan_base) +{ + dsb(sy); + + /* ugly busy wait only option for now */ + while (readl(dma_chan_base + BCM2708_DMA_CS) & BCM2708_DMA_ACTIVE) + cpu_relax(); +} +EXPORT_SYMBOL_GPL(bcm_dma_wait_idle); + +extern bool bcm_dma_is_busy(void __iomem *dma_chan_base) +{ + dsb(sy); + + return readl(dma_chan_base + BCM2708_DMA_CS) & BCM2708_DMA_ACTIVE; +} +EXPORT_SYMBOL_GPL(bcm_dma_is_busy); + +/* Complete an ongoing DMA (assuming its results are to be ignored) + Does nothing if there is no DMA in progress. + This routine waits for the current AXI transfer to complete before + terminating the current DMA. If the current transfer is hung on a DREQ used + by an uncooperative peripheral the AXI transfer may never complete. In this + case the routine times out and return a non-zero error code. + Use of this routine doesn't guarantee that the ongoing or aborted DMA + does not produce an interrupt. +*/ +extern int bcm_dma_abort(void __iomem *dma_chan_base) +{ + unsigned long int cs; + int rc = 0; + + cs = readl(dma_chan_base + BCM2708_DMA_CS); + + if (BCM2708_DMA_ACTIVE & cs) { + long int timeout = 10000; + + /* write 0 to the active bit - pause the DMA */ + writel(0, dma_chan_base + BCM2708_DMA_CS); + + /* wait for any current AXI transfer to complete */ + while (0 != (cs & BCM2708_DMA_ISPAUSED) && --timeout >= 0) + cs = readl(dma_chan_base + BCM2708_DMA_CS); + + if (0 != (cs & BCM2708_DMA_ISPAUSED)) { + /* we'll un-pause when we set of our next DMA */ + rc = -ETIMEDOUT; + + } else if (BCM2708_DMA_ACTIVE & cs) { + /* terminate the control block chain */ + writel(0, dma_chan_base + BCM2708_DMA_NEXTCB); + + /* abort the whole DMA */ + writel(BCM2708_DMA_ABORT | BCM2708_DMA_ACTIVE, + dma_chan_base + BCM2708_DMA_CS); + } + } + + return rc; +} +EXPORT_SYMBOL_GPL(bcm_dma_abort); + + /* DMA Manager Device Methods */ + +static void vc_dmaman_init(struct vc_dmaman *dmaman, void __iomem *dma_base, + u32 chans_available) +{ + dmaman->dma_base = dma_base; + dmaman->chan_available = chans_available; + dmaman->has_feature[BCM_DMA_FEATURE_FAST_ORD] = 0x0c; /* 2 & 3 */ + dmaman->has_feature[BCM_DMA_FEATURE_BULK_ORD] = 0x01; /* 0 */ + dmaman->has_feature[BCM_DMA_FEATURE_NORMAL_ORD] = 0xfe; /* 1 to 7 */ + dmaman->has_feature[BCM_DMA_FEATURE_LITE_ORD] = 0x7f00; /* 8 to 14 */ +} + +static int vc_dmaman_chan_alloc(struct vc_dmaman *dmaman, + unsigned required_feature_set) +{ + u32 chans; + int chan = 0; + int feature; + + chans = dmaman->chan_available; + for (feature = 0; feature < BCM_DMA_FEATURE_COUNT; feature++) + /* select the subset of available channels with the desired + features */ + if (required_feature_set & (1 << feature)) + chans &= dmaman->has_feature[feature]; + + if (!chans) + return -ENOENT; + + /* return the ordinal of the first channel in the bitmap */ + while (chans != 0 && (chans & 1) == 0) { + chans >>= 1; + chan++; + } + /* claim the channel */ + dmaman->chan_available &= ~(1 << chan); + + return chan; +} + +static int vc_dmaman_chan_free(struct vc_dmaman *dmaman, int chan) +{ + if (chan < 0) + return -EINVAL; + + if ((1 << chan) & dmaman->chan_available) + return -EIDRM; + + dmaman->chan_available |= (1 << chan); + + return 0; +} + +/* DMA Manager Monitor */ + +extern int bcm_dma_chan_alloc(unsigned required_feature_set, + void __iomem **out_dma_base, int *out_dma_irq) +{ + struct vc_dmaman *dmaman = g_dmaman; + struct platform_device *pdev = to_platform_device(dmaman_dev); + int chan; + int irq; + + if (!dmaman_dev) + return -ENODEV; + + mutex_lock(&dmaman->lock); + chan = vc_dmaman_chan_alloc(dmaman, required_feature_set); + if (chan < 0) + goto out; + + irq = platform_get_irq(pdev, (unsigned int)chan); + if (irq < 0) { + dev_err(dmaman_dev, "failed to get irq for DMA channel %d\n", + chan); + vc_dmaman_chan_free(dmaman, chan); + chan = -ENOENT; + goto out; + } + + *out_dma_base = BCM2708_DMA_CHANIO(dmaman->dma_base, chan); + *out_dma_irq = irq; + dev_dbg(dmaman_dev, + "Legacy API allocated channel=%d, base=%p, irq=%i\n", + chan, *out_dma_base, *out_dma_irq); + +out: + mutex_unlock(&dmaman->lock); + + return chan; +} +EXPORT_SYMBOL_GPL(bcm_dma_chan_alloc); + +extern int bcm_dma_chan_free(int channel) +{ + struct vc_dmaman *dmaman = g_dmaman; + int rc; + + if (!dmaman_dev) + return -ENODEV; + + mutex_lock(&dmaman->lock); + rc = vc_dmaman_chan_free(dmaman, channel); + mutex_unlock(&dmaman->lock); + + return rc; +} +EXPORT_SYMBOL_GPL(bcm_dma_chan_free); + +int bcm_dmaman_probe(struct platform_device *pdev, void __iomem *base, + u32 chans_available) +{ + struct device *dev = &pdev->dev; + struct vc_dmaman *dmaman; + + dmaman = devm_kzalloc(dev, sizeof(*dmaman), GFP_KERNEL); + if (!dmaman) + return -ENOMEM; + + mutex_init(&dmaman->lock); + vc_dmaman_init(dmaman, base, chans_available); + g_dmaman = dmaman; + dmaman_dev = dev; + + dev_info(dev, "DMA legacy API manager, dmachans=0x%x\n", + chans_available); + + return 0; +} +EXPORT_SYMBOL(bcm_dmaman_probe); + +int bcm_dmaman_remove(struct platform_device *pdev) +{ + dmaman_dev = NULL; + + return 0; +} +EXPORT_SYMBOL(bcm_dmaman_remove); + +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 0117bb2e8591..70a4f4e5991f 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -18,6 +18,7 @@ * Copyright 2012 Marvell International Ltd. */ #include <linux/dmaengine.h> +#include <linux/dma-direct.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/err.h> @@ -25,6 +26,7 @@ #include <linux/interrupt.h> #include <linux/list.h> #include <linux/module.h> +#include <linux/platform_data/dma-bcm2708.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/io.h> @@ -36,6 +38,13 @@ #define BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED 14 #define BCM2835_DMA_CHAN_NAME_SIZE 8 +#define BCM2835_DMA_BULK_MASK BIT(0) +#define BCM2711_DMA_MEMCPY_CHAN 14 + +struct bcm2835_dma_cfg_data { + u64 dma_mask; + u32 chan_40bit_mask; +}; /** * struct bcm2835_dmadev - BCM2835 DMA controller @@ -48,6 +57,7 @@ struct bcm2835_dmadev { struct dma_device ddev; void __iomem *base; dma_addr_t zero_page; + const struct bcm2835_dma_cfg_data *cfg_data; }; struct bcm2835_dma_cb { @@ -60,6 +70,17 @@ struct bcm2835_dma_cb { uint32_t pad[2]; }; +struct bcm2711_dma40_scb { + uint32_t ti; + uint32_t src; + uint32_t srci; + uint32_t dst; + uint32_t dsti; + uint32_t len; + uint32_t next_cb; + uint32_t rsvd; +}; + struct bcm2835_cb_entry { struct bcm2835_dma_cb *cb; dma_addr_t paddr; @@ -80,6 +101,8 @@ struct bcm2835_chan { unsigned int irq_flags; bool is_lite_channel; + bool is_40bit_channel; + bool is_2712; }; struct bcm2835_desc { @@ -136,11 +159,37 @@ struct bcm2835_desc { #define BCM2835_DMA_S_WIDTH BIT(9) /* 128bit writes if set */ #define BCM2835_DMA_S_DREQ BIT(10) /* enable SREQ for source */ #define BCM2835_DMA_S_IGNORE BIT(11) /* ignore source reads - read 0 */ -#define BCM2835_DMA_BURST_LENGTH(x) ((x & 15) << 12) +#define BCM2835_DMA_BURST_LENGTH(x) (((x) & 15) << 12) +#define BCM2835_DMA_GET_BURST_LENGTH(x) (((x) >> 12) & 15) +#define BCM2835_DMA_CS_FLAGS(x) (x & (BCM2835_DMA_PRIORITY(15) | \ + BCM2835_DMA_PANIC_PRIORITY(15) | \ + BCM2835_DMA_WAIT_FOR_WRITES | \ + BCM2835_DMA_DIS_DEBUG)) #define BCM2835_DMA_PER_MAP(x) ((x & 31) << 16) /* REQ source */ #define BCM2835_DMA_WAIT(x) ((x & 31) << 21) /* add DMA-wait cycles */ #define BCM2835_DMA_NO_WIDE_BURSTS BIT(26) /* no 2 beat write bursts */ +/* A fake bit to request that the driver doesn't set the WAIT_RESP bit. */ +#define BCM2835_DMA_NO_WAIT_RESP BIT(27) +#define WAIT_RESP(x) ((x & BCM2835_DMA_NO_WAIT_RESP) ? \ + 0 : BCM2835_DMA_WAIT_RESP) + +/* A fake bit to request that the driver requires wide reads */ +#define BCM2835_DMA_WIDE_SOURCE BIT(24) +#define WIDE_SOURCE(x) ((x & BCM2835_DMA_WIDE_SOURCE) ? \ + BCM2835_DMA_S_WIDTH : 0) + +/* A fake bit to request that the driver requires wide writes */ +#define BCM2835_DMA_WIDE_DEST BIT(25) +#define WIDE_DEST(x) ((x & BCM2835_DMA_WIDE_DEST) ? \ + BCM2835_DMA_D_WIDTH : 0) + +/* A fake bit to request that the driver requires multi-beat burst */ +#define BCM2835_DMA_BURST BIT(30) +#define BURST_LENGTH(x) ((x & BCM2835_DMA_BURST) ? \ + BCM2835_DMA_BURST_LENGTH(3) : 0) + + /* debug register bits */ #define BCM2835_DMA_DEBUG_LAST_NOT_SET_ERR BIT(0) #define BCM2835_DMA_DEBUG_FIFO_ERR BIT(1) @@ -165,13 +214,130 @@ struct bcm2835_desc { #define BCM2835_DMA_DATA_TYPE_S128 16 /* Valid only for channels 0 - 14, 15 has its own base address */ -#define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */ +#define BCM2835_DMA_CHAN_SIZE 0x100 +#define BCM2835_DMA_CHAN(n) ((n) * BCM2835_DMA_CHAN_SIZE) /* Base address */ #define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n)) /* the max dma length for different channels */ #define MAX_DMA_LEN SZ_1G #define MAX_LITE_DMA_LEN (SZ_64K - 4) +/* 40-bit DMA support */ +#define BCM2711_DMA40_CS 0x00 +#define BCM2711_DMA40_CB 0x04 +#define BCM2711_DMA40_DEBUG 0x0c +#define BCM2711_DMA40_TI 0x10 +#define BCM2711_DMA40_SRC 0x14 +#define BCM2711_DMA40_SRCI 0x18 +#define BCM2711_DMA40_DEST 0x1c +#define BCM2711_DMA40_DESTI 0x20 +#define BCM2711_DMA40_LEN 0x24 +#define BCM2711_DMA40_NEXT_CB 0x28 +#define BCM2711_DMA40_DEBUG2 0x2c + +#define BCM2711_DMA40_ACTIVE BIT(0) +#define BCM2711_DMA40_END BIT(1) +#define BCM2711_DMA40_INT BIT(2) +#define BCM2711_DMA40_DREQ BIT(3) /* DREQ state */ +#define BCM2711_DMA40_RD_PAUSED BIT(4) /* Reading is paused */ +#define BCM2711_DMA40_WR_PAUSED BIT(5) /* Writing is paused */ +#define BCM2711_DMA40_DREQ_PAUSED BIT(6) /* Is paused by DREQ flow control */ +#define BCM2711_DMA40_WAITING_FOR_WRITES BIT(7) /* Waiting for last write */ +// we always want to run in supervisor mode +#define BCM2711_DMA40_PROT (BIT(8)|BIT(9)) +#define BCM2711_DMA40_ERR BIT(10) +#define BCM2711_DMA40_QOS(x) (((x) & 0x1f) << 16) +#define BCM2711_DMA40_PANIC_QOS(x) (((x) & 0x1f) << 20) +#define BCM2711_DMA40_TRANSACTIONS BIT(25) +#define BCM2711_DMA40_WAIT_FOR_WRITES BIT(28) +#define BCM2711_DMA40_DISDEBUG BIT(29) +#define BCM2711_DMA40_ABORT BIT(30) +#define BCM2711_DMA40_HALT BIT(31) + +#define BCM2711_DMA40_CS_FLAGS(x) (x & (BCM2711_DMA40_QOS(15) | \ + BCM2711_DMA40_PANIC_QOS(15) | \ + BCM2711_DMA40_WAIT_FOR_WRITES | \ + BCM2711_DMA40_DISDEBUG)) + +/* Transfer information bits */ +#define BCM2711_DMA40_INTEN BIT(0) +#define BCM2711_DMA40_TDMODE BIT(1) /* 2D-Mode */ +#define BCM2711_DMA40_WAIT_RESP BIT(2) /* wait for AXI write to be acked */ +#define BCM2711_DMA40_WAIT_RD_RESP BIT(3) /* wait for AXI read to complete */ +#define BCM2711_DMA40_PER_MAP(x) ((x & 31) << 9) /* REQ source */ +#define BCM2711_DMA40_S_DREQ BIT(14) /* enable SREQ for source */ +#define BCM2711_DMA40_D_DREQ BIT(15) /* enable DREQ for destination */ +#define BCM2711_DMA40_S_WAIT(x) ((x & 0xff) << 16) /* add DMA read-wait cycles */ +#define BCM2711_DMA40_D_WAIT(x) ((x & 0xff) << 24) /* add DMA write-wait cycles */ + +/* debug register bits */ +#define BCM2711_DMA40_DEBUG_WRITE_ERR BIT(0) +#define BCM2711_DMA40_DEBUG_FIFO_ERR BIT(1) +#define BCM2711_DMA40_DEBUG_READ_ERR BIT(2) +#define BCM2711_DMA40_DEBUG_READ_CB_ERR BIT(3) +#define BCM2711_DMA40_DEBUG_IN_ON_ERR BIT(8) +#define BCM2711_DMA40_DEBUG_ABORT_ON_ERR BIT(9) +#define BCM2711_DMA40_DEBUG_HALT_ON_ERR BIT(10) +#define BCM2711_DMA40_DEBUG_DISABLE_CLK_GATE BIT(11) +#define BCM2711_DMA40_DEBUG_RSTATE_SHIFT 14 +#define BCM2711_DMA40_DEBUG_RSTATE_BITS 4 +#define BCM2711_DMA40_DEBUG_WSTATE_SHIFT 18 +#define BCM2711_DMA40_DEBUG_WSTATE_BITS 4 +#define BCM2711_DMA40_DEBUG_RESET BIT(23) +#define BCM2711_DMA40_DEBUG_ID_SHIFT 24 +#define BCM2711_DMA40_DEBUG_ID_BITS 4 +#define BCM2711_DMA40_DEBUG_VERSION_SHIFT 28 +#define BCM2711_DMA40_DEBUG_VERSION_BITS 4 + +/* Valid only for channels 0 - 3 (11 - 14) */ +#define BCM2711_DMA40_CHAN(n) (((n) + 11) << 8) /* Base address */ +#define BCM2711_DMA40_CHANIO(base, n) ((base) + BCM2711_DMA_CHAN(n)) + +/* the max dma length for different channels */ +#define MAX_DMA40_LEN SZ_1G + +#define BCM2711_DMA40_BURST_LEN(x) (((x) & 15) << 8) +#define BCM2711_DMA40_INC BIT(12) +#define BCM2711_DMA40_SIZE_32 (0 << 13) +#define BCM2711_DMA40_SIZE_64 (1 << 13) +#define BCM2711_DMA40_SIZE_128 (2 << 13) +#define BCM2711_DMA40_SIZE_256 (3 << 13) +#define BCM2711_DMA40_IGNORE BIT(15) +#define BCM2711_DMA40_STRIDE(x) ((x) << 16) /* For 2D mode */ + +#define BCM2711_DMA40_MEMCPY_FLAGS \ + (BCM2711_DMA40_QOS(0) | \ + BCM2711_DMA40_PANIC_QOS(0) | \ + BCM2711_DMA40_WAIT_FOR_WRITES | \ + BCM2711_DMA40_DISDEBUG) + +#define BCM2711_DMA40_MEMCPY_XFER_INFO \ + (BCM2711_DMA40_SIZE_128 | \ + BCM2711_DMA40_INC | \ + BCM2711_DMA40_BURST_LEN(16)) + +struct bcm2835_dmadev *memcpy_parent; +static void __iomem *memcpy_chan; +static struct bcm2711_dma40_scb *memcpy_scb; +static dma_addr_t memcpy_scb_dma; +DEFINE_SPINLOCK(memcpy_lock); + +static const struct bcm2835_dma_cfg_data bcm2835_dma_cfg = { + .chan_40bit_mask = 0, + .dma_mask = DMA_BIT_MASK(32), +}; + +static const struct bcm2835_dma_cfg_data bcm2711_dma_cfg = { + .chan_40bit_mask = BIT(11) | BIT(12) | BIT(13) | BIT(14), + .dma_mask = DMA_BIT_MASK(36), +}; + +static const struct bcm2835_dma_cfg_data bcm2712_dma_cfg = { + .chan_40bit_mask = BIT(6) | BIT(7) | BIT(8) | BIT(9) | + BIT(10) | BIT(11), + .dma_mask = DMA_BIT_MASK(40), +}; + static inline size_t bcm2835_dma_max_frame_length(struct bcm2835_chan *c) { /* lite and normal channels have different max frame length */ @@ -201,6 +367,36 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc( return container_of(t, struct bcm2835_desc, vd.tx); } +static inline uint32_t to_bcm2711_ti(uint32_t info) +{ + return ((info & BCM2835_DMA_INT_EN) ? BCM2711_DMA40_INTEN : 0) | + ((info & BCM2835_DMA_WAIT_RESP) ? BCM2711_DMA40_WAIT_RESP : 0) | + ((info & BCM2835_DMA_S_DREQ) ? + (BCM2711_DMA40_S_DREQ | BCM2711_DMA40_WAIT_RD_RESP) : 0) | + ((info & BCM2835_DMA_D_DREQ) ? BCM2711_DMA40_D_DREQ : 0) | + BCM2711_DMA40_PER_MAP((info >> 16) & 0x1f); +} + +static inline uint32_t to_bcm2711_srci(uint32_t info) +{ + return ((info & BCM2835_DMA_S_INC) ? BCM2711_DMA40_INC : 0) | + ((info & BCM2835_DMA_S_WIDTH) ? BCM2711_DMA40_SIZE_128 : 0) | + BCM2711_DMA40_BURST_LEN(BCM2835_DMA_GET_BURST_LENGTH(info)); +} + +static inline uint32_t to_bcm2711_dsti(uint32_t info) +{ + return ((info & BCM2835_DMA_D_INC) ? BCM2711_DMA40_INC : 0) | + ((info & BCM2835_DMA_D_WIDTH) ? BCM2711_DMA40_SIZE_128 : 0) | + BCM2711_DMA40_BURST_LEN(BCM2835_DMA_GET_BURST_LENGTH(info)); +} + +static inline uint32_t to_40bit_cbaddr(dma_addr_t addr) +{ + BUG_ON(addr & 0x1f); + return (addr >> 5); +} + static void bcm2835_dma_free_cb_chain(struct bcm2835_desc *desc) { size_t i; @@ -219,45 +415,53 @@ static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) } static void bcm2835_dma_create_cb_set_length( - struct bcm2835_chan *chan, + struct bcm2835_chan *c, struct bcm2835_dma_cb *control_block, size_t len, size_t period_len, size_t *total_len, u32 finalextrainfo) { - size_t max_len = bcm2835_dma_max_frame_length(chan); + size_t max_len = bcm2835_dma_max_frame_length(c); + uint32_t cb_len; /* set the length taking lite-channel limitations into account */ - control_block->length = min_t(u32, len, max_len); + cb_len = min_t(u32, len, max_len); - /* finished if we have no period_length */ - if (!period_len) - return; + if (period_len) { + /* + * period_len means: that we need to generate + * transfers that are terminating at every + * multiple of period_len - this is typically + * used to set the interrupt flag in info + * which is required during cyclic transfers + */ - /* - * period_len means: that we need to generate - * transfers that are terminating at every - * multiple of period_len - this is typically - * used to set the interrupt flag in info - * which is required during cyclic transfers - */ + /* have we filled in period_length yet? */ + if (*total_len + cb_len < period_len) { + /* update number of bytes in this period so far */ + *total_len += cb_len; + } else { + /* calculate the length that remains to reach period_len */ + cb_len = period_len - *total_len; - /* have we filled in period_length yet? */ - if (*total_len + control_block->length < period_len) { - /* update number of bytes in this period so far */ - *total_len += control_block->length; - return; + /* reset total_length for next period */ + *total_len = 0; + } } - /* calculate the length that remains to reach period_length */ - control_block->length = period_len - *total_len; + if (c->is_40bit_channel) { + struct bcm2711_dma40_scb *scb = + (struct bcm2711_dma40_scb *)control_block; - /* reset total_length for next period */ - *total_len = 0; - - /* add extrainfo bits in info */ - control_block->info |= finalextrainfo; + scb->len = cb_len; + /* add extrainfo bits to ti */ + scb->ti |= to_bcm2711_ti(finalextrainfo); + } else { + control_block->length = cb_len; + /* add extrainfo bits to info */ + control_block->info |= finalextrainfo; + } } static inline size_t bcm2835_dma_count_frames_for_sg( @@ -280,7 +484,7 @@ static inline size_t bcm2835_dma_count_frames_for_sg( /** * bcm2835_dma_create_cb_chain - create a control block and fills data in * - * @chan: the @dma_chan for which we run this + * @c: the @bcm2835_chan for which we run this * @direction: the direction in which we transfer * @cyclic: it is a cyclic transfer * @info: the default info bits to apply per controlblock @@ -298,12 +502,11 @@ static inline size_t bcm2835_dma_count_frames_for_sg( * @gfp: the GFP flag to use for allocation */ static struct bcm2835_desc *bcm2835_dma_create_cb_chain( - struct dma_chan *chan, enum dma_transfer_direction direction, + struct bcm2835_chan *c, enum dma_transfer_direction direction, bool cyclic, u32 info, u32 finalextrainfo, size_t frames, dma_addr_t src, dma_addr_t dst, size_t buf_len, size_t period_len, gfp_t gfp) { - struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); size_t len = buf_len, total_len; size_t frame; struct bcm2835_desc *d; @@ -335,11 +538,27 @@ static struct bcm2835_desc *bcm2835_dma_create_cb_chain( /* fill in the control block */ control_block = cb_entry->cb; - control_block->info = info; - control_block->src = src; - control_block->dst = dst; - control_block->stride = 0; - control_block->next = 0; + if (c->is_40bit_channel) { + struct bcm2711_dma40_scb *scb = + (struct bcm2711_dma40_scb *)control_block; + scb->ti = to_bcm2711_ti(info); + scb->src = lower_32_bits(src); + scb->srci= upper_32_bits(src) | to_bcm2711_srci(info); + scb->dst = lower_32_bits(dst); + scb->dsti = upper_32_bits(dst) | to_bcm2711_dsti(info); + scb->next_cb = 0; + } else { + control_block->info = info; + control_block->src = src; + control_block->dst = dst; + if (c->is_2712) + control_block->stride = (upper_32_bits(dst) << 8) | + upper_32_bits(src); + else + control_block->stride = 0; + control_block->next = 0; + } + /* set up length in control_block if requested */ if (buf_len) { /* calculate length honoring period_length */ @@ -349,25 +568,52 @@ static struct bcm2835_desc *bcm2835_dma_create_cb_chain( cyclic ? finalextrainfo : 0); /* calculate new remaining length */ - len -= control_block->length; + if (c->is_40bit_channel) + len -= ((struct bcm2711_dma40_scb *)control_block)->len; + else + len -= control_block->length; } /* link this the last controlblock */ - if (frame) - d->cb_list[frame - 1].cb->next = cb_entry->paddr; + if (frame && c->is_40bit_channel) + ((struct bcm2711_dma40_scb *) + d->cb_list[frame - 1].cb)->next_cb = + to_40bit_cbaddr(cb_entry->paddr); + if (frame && !c->is_40bit_channel) + d->cb_list[frame - 1].cb->next = c->is_2712 ? + to_40bit_cbaddr(cb_entry->paddr) : cb_entry->paddr; /* update src and dst and length */ - if (src && (info & BCM2835_DMA_S_INC)) - src += control_block->length; - if (dst && (info & BCM2835_DMA_D_INC)) - dst += control_block->length; + if (src && (info & BCM2835_DMA_S_INC)) { + if (c->is_40bit_channel) + src += ((struct bcm2711_dma40_scb *)control_block)->len; + else + src += control_block->length; + } + + if (dst && (info & BCM2835_DMA_D_INC)) { + if (c->is_40bit_channel) + dst += ((struct bcm2711_dma40_scb *)control_block)->len; + else + dst += control_block->length; + } /* Length of total transfer */ - d->size += control_block->length; + if (c->is_40bit_channel) + d->size += ((struct bcm2711_dma40_scb *)control_block)->len; + else + d->size += control_block->length; } /* the last frame requires extra flags */ - d->cb_list[d->frames - 1].cb->info |= finalextrainfo; + if (c->is_40bit_channel) { + struct bcm2711_dma40_scb *scb = + (struct bcm2711_dma40_scb *)d->cb_list[d->frames-1].cb; + + scb->ti |= to_bcm2711_ti(finalextrainfo); + } else { + d->cb_list[d->frames - 1].cb->info |= finalextrainfo; + } /* detect a size mismatch */ if (buf_len && (d->size != buf_len)) @@ -381,13 +627,12 @@ error_cb: } static void bcm2835_dma_fill_cb_chain_with_sg( - struct dma_chan *chan, + struct bcm2835_chan *c, enum dma_transfer_direction direction, struct bcm2835_cb_entry *cb, struct scatterlist *sgl, unsigned int sg_len) { - struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); size_t len, max_len; unsigned int i; dma_addr_t addr; @@ -395,14 +640,35 @@ static void bcm2835_dma_fill_cb_chain_with_sg( max_len = bcm2835_dma_max_frame_length(c); for_each_sg(sgl, sgent, sg_len, i) { - for (addr = sg_dma_address(sgent), len = sg_dma_len(sgent); - len > 0; - addr += cb->cb->length, len -= cb->cb->length, cb++) { - if (direction == DMA_DEV_TO_MEM) - cb->cb->dst = addr; - else - cb->cb->src = addr; - cb->cb->length = min(len, max_len); + if (c->is_40bit_channel) { + struct bcm2711_dma40_scb *scb; + + for (addr = sg_dma_address(sgent), + len = sg_dma_len(sgent); + len > 0; + addr += scb->len, len -= scb->len, cb++) { + scb = (struct bcm2711_dma40_scb *)cb->cb; + if (direction == DMA_DEV_TO_MEM) { + scb->dst = lower_32_bits(addr); + scb->dsti = upper_32_bits(addr) | BCM2711_DMA40_INC; + } else { + scb->src = lower_32_bits(addr); + scb->srci = upper_32_bits(addr) | BCM2711_DMA40_INC; + } + scb->len = min(len, max_len); + } + } else { + for (addr = sg_dma_address(sgent), + len = sg_dma_len(sgent); + len > 0; + addr += cb->cb->length, len -= cb->cb->length, + cb++) { + if (direction == DMA_DEV_TO_MEM) + cb->cb->dst = addr; + else + cb->cb->src = addr; + cb->cb->length = min(len, max_len); + } } } } @@ -410,29 +676,74 @@ static void bcm2835_dma_fill_cb_chain_with_sg( static void bcm2835_dma_abort(struct bcm2835_chan *c) { void __iomem *chan_base = c->chan_base; - long int timeout = 10000; + long timeout = 100; - /* - * A zero control block address means the channel is idle. - * (The ACTIVE flag in the CS register is not a reliable indicator.) - */ - if (!readl(chan_base + BCM2835_DMA_ADDR)) - return; + if (c->is_40bit_channel) { + /* + * A zero control block address means the channel is idle. + * (The ACTIVE flag in the CS register is not a reliable indicator.) + */ + if (!readl(chan_base + BCM2711_DMA40_CB)) + return; + + /* Pause the current DMA */ + writel(readl(chan_base + BCM2711_DMA40_CS) & ~BCM2711_DMA40_ACTIVE, + chan_base + BCM2711_DMA40_CS); + + /* wait for outstanding transactions to complete */ + while ((readl(chan_base + BCM2711_DMA40_CS) & BCM2711_DMA40_TRANSACTIONS) && + --timeout) + cpu_relax(); + + /* Peripheral might be stuck and fail to complete */ + if (!timeout) + dev_err(c->vc.chan.device->dev, + "failed to complete pause on dma %d (CS:%08x)\n", c->ch, + readl(chan_base + BCM2711_DMA40_CS)); + + /* Set CS back to default state */ + writel(BCM2711_DMA40_PROT, chan_base + BCM2711_DMA40_CS); + + /* Reset the DMA */ + writel(readl(chan_base + BCM2711_DMA40_DEBUG) | BCM2711_DMA40_DEBUG_RESET, + chan_base + BCM2711_DMA40_DEBUG); + } else { + /* + * A zero control block address means the channel is idle. + * (The ACTIVE flag in the CS register is not a reliable indicator.) + */ + if (!readl(chan_base + BCM2835_DMA_ADDR)) + return; - /* Write 0 to the active bit - Pause the DMA */ - writel(0, chan_base + BCM2835_DMA_CS); + /* We need to clear the next DMA block pending */ + writel(0, chan_base + BCM2835_DMA_NEXTCB); - /* Wait for any current AXI transfer to complete */ - while ((readl(chan_base + BCM2835_DMA_CS) & - BCM2835_DMA_WAITING_FOR_WRITES) && --timeout) - cpu_relax(); + /* Abort the DMA, which needs to be enabled to complete */ + writel(readl(chan_base + BCM2835_DMA_CS) | BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE, + chan_base + BCM2835_DMA_CS); - /* Peripheral might be stuck and fail to signal AXI write responses */ - if (!timeout) - dev_err(c->vc.chan.device->dev, - "failed to complete outstanding writes\n"); + /* wait for DMA to be aborted */ + while ((readl(chan_base + BCM2835_DMA_CS) & BCM2835_DMA_ABORT) && --timeout) + cpu_relax(); - writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); + /* Write 0 to the active bit - Pause the DMA */ + writel(readl(chan_base + BCM2835_DMA_CS) & ~BCM2835_DMA_ACTIVE, + chan_base + BCM2835_DMA_CS); + + /* + * Peripheral might be stuck and fail to complete + * This is expected when dreqs are enabled but not asserted + * so only report error in non dreq case + */ + if (!timeout && !(readl(chan_base + BCM2835_DMA_TI) & + (BCM2835_DMA_S_DREQ | BCM2835_DMA_D_DREQ))) + dev_err(c->vc.chan.device->dev, + "failed to complete pause on dma %d (CS:%08x)\n", c->ch, + readl(chan_base + BCM2835_DMA_CS)); + + /* Set CS back to default state and reset the DMA */ + writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); + } } static void bcm2835_dma_start_desc(struct bcm2835_chan *c) @@ -449,8 +760,19 @@ static void bcm2835_dma_start_desc(struct bcm2835_chan *c) c->desc = d = to_bcm2835_dma_desc(&vd->tx); - writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR); - writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); + if (c->is_40bit_channel) { + writel(to_40bit_cbaddr(d->cb_list[0].paddr), + c->chan_base + BCM2711_DMA40_CB); + writel(BCM2711_DMA40_ACTIVE | BCM2711_DMA40_PROT | BCM2711_DMA40_CS_FLAGS(c->dreq), + c->chan_base + BCM2711_DMA40_CS); + } else { + writel(BIT(31), c->chan_base + BCM2835_DMA_CS); + + writel(c->is_2712 ? to_40bit_cbaddr(d->cb_list[0].paddr) : d->cb_list[0].paddr, + c->chan_base + BCM2835_DMA_ADDR); + writel(BCM2835_DMA_ACTIVE | BCM2835_DMA_CS_FLAGS(c->dreq), + c->chan_base + BCM2835_DMA_CS); + } } static irqreturn_t bcm2835_dma_callback(int irq, void *data) @@ -477,8 +799,13 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) * if this IRQ handler is threaded.) If the channel is finished, it * will remain idle despite the ACTIVE flag being set. */ - writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE, - c->chan_base + BCM2835_DMA_CS); + if (c->is_40bit_channel) + writel(BCM2835_DMA_INT | BCM2711_DMA40_ACTIVE | BCM2711_DMA40_PROT | + BCM2711_DMA40_CS_FLAGS(c->dreq), + c->chan_base + BCM2711_DMA40_CS); + else + writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE | BCM2835_DMA_CS_FLAGS(c->dreq), + c->chan_base + BCM2835_DMA_CS); d = c->desc; @@ -540,20 +867,39 @@ static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr) unsigned int i; size_t size; - for (size = i = 0; i < d->frames; i++) { - struct bcm2835_dma_cb *control_block = d->cb_list[i].cb; - size_t this_size = control_block->length; - dma_addr_t dma; + if (d->c->is_40bit_channel) { + for (size = i = 0; i < d->frames; i++) { + struct bcm2711_dma40_scb *control_block = + (struct bcm2711_dma40_scb *)d->cb_list[i].cb; + size_t this_size = control_block->len; + dma_addr_t dma; - if (d->dir == DMA_DEV_TO_MEM) - dma = control_block->dst; - else - dma = control_block->src; + if (d->dir == DMA_DEV_TO_MEM) + dma = control_block->dst; + else + dma = control_block->src; + + if (size) + size += this_size; + else if (addr >= dma && addr < dma + this_size) + size += dma + this_size - addr; + } + } else { + for (size = i = 0; i < d->frames; i++) { + struct bcm2835_dma_cb *control_block = d->cb_list[i].cb; + size_t this_size = control_block->length; + dma_addr_t dma; - if (size) - size += this_size; - else if (addr >= dma && addr < dma + this_size) - size += dma + this_size - addr; + if (d->dir == DMA_DEV_TO_MEM) + dma = control_block->dst; + else + dma = control_block->src; + + if (size) + size += this_size; + else if (addr >= dma && addr < dma + this_size) + size += dma + this_size - addr; + } } return size; @@ -580,12 +926,25 @@ static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan, struct bcm2835_desc *d = c->desc; dma_addr_t pos; - if (d->dir == DMA_MEM_TO_DEV) + if (d->dir == DMA_MEM_TO_DEV && c->is_40bit_channel) { + u64 lo_bits, hi_bits; + + lo_bits = readl(c->chan_base + BCM2711_DMA40_SRC); + hi_bits = readl(c->chan_base + BCM2711_DMA40_SRCI) & 0xff; + pos = (hi_bits << 32) | lo_bits; + } else if (d->dir == DMA_MEM_TO_DEV && !c->is_40bit_channel) { pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD); - else if (d->dir == DMA_DEV_TO_MEM) + } else if (d->dir == DMA_DEV_TO_MEM && c->is_40bit_channel) { + u64 lo_bits, hi_bits; + + lo_bits = readl(c->chan_base + BCM2711_DMA40_DEST); + hi_bits = readl(c->chan_base + BCM2711_DMA40_DESTI) & 0xff; + pos = (hi_bits << 32) | lo_bits; + } else if (d->dir == DMA_DEV_TO_MEM && !c->is_40bit_channel) { pos = readl(c->chan_base + BCM2835_DMA_DEST_AD); - else + } else { pos = 0; + } txstate->residue = bcm2835_dma_desc_size_pos(d, pos); } else { @@ -615,8 +974,10 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy( { struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); struct bcm2835_desc *d; - u32 info = BCM2835_DMA_D_INC | BCM2835_DMA_S_INC; - u32 extra = BCM2835_DMA_INT_EN | BCM2835_DMA_WAIT_RESP; + u32 info = BCM2835_DMA_D_INC | BCM2835_DMA_S_INC | + WAIT_RESP(c->dreq) | WIDE_SOURCE(c->dreq) | + WIDE_DEST(c->dreq) | BURST_LENGTH(c->dreq); + u32 extra = BCM2835_DMA_INT_EN; size_t max_len = bcm2835_dma_max_frame_length(c); size_t frames; @@ -628,7 +989,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy( frames = bcm2835_dma_frames_for_length(len, max_len); /* allocate the CB chain - this also fills in the pointers */ - d = bcm2835_dma_create_cb_chain(chan, DMA_MEM_TO_MEM, false, + d = bcm2835_dma_create_cb_chain(c, DMA_MEM_TO_MEM, false, info, extra, frames, src, dst, len, 0, GFP_KERNEL); if (!d) @@ -646,7 +1007,8 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg( struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); struct bcm2835_desc *d; dma_addr_t src = 0, dst = 0; - u32 info = BCM2835_DMA_WAIT_RESP; + u32 info = WAIT_RESP(c->dreq) | WIDE_SOURCE(c->dreq) | + WIDE_DEST(c->dreq) | BURST_LENGTH(c->dreq); u32 extra = BCM2835_DMA_INT_EN; size_t frames; @@ -662,12 +1024,12 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg( if (direction == DMA_DEV_TO_MEM) { if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) return NULL; - src = c->cfg.src_addr; + src = phys_to_dma(chan->device->dev, c->cfg.src_addr); info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC; } else { if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) return NULL; - dst = c->cfg.dst_addr; + dst = phys_to_dma(chan->device->dev, c->cfg.dst_addr); info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC; } @@ -675,7 +1037,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg( frames = bcm2835_dma_count_frames_for_sg(c, sgl, sg_len); /* allocate the CB chain */ - d = bcm2835_dma_create_cb_chain(chan, direction, false, + d = bcm2835_dma_create_cb_chain(c, direction, false, info, extra, frames, src, dst, 0, 0, GFP_NOWAIT); @@ -683,7 +1045,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg( return NULL; /* fill in frames with scatterlist pointers */ - bcm2835_dma_fill_cb_chain_with_sg(chan, direction, d->cb_list, + bcm2835_dma_fill_cb_chain_with_sg(c, direction, d->cb_list, sgl, sg_len); return vchan_tx_prep(&c->vc, &d->vd, flags); @@ -698,7 +1060,8 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); struct bcm2835_desc *d; dma_addr_t src, dst; - u32 info = BCM2835_DMA_WAIT_RESP; + u32 info = WAIT_RESP(c->dreq) | WIDE_SOURCE(c->dreq) | + WIDE_DEST(c->dreq) | BURST_LENGTH(c->dreq); u32 extra = 0; size_t max_len = bcm2835_dma_max_frame_length(c); size_t frames; @@ -736,13 +1099,13 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( if (direction == DMA_DEV_TO_MEM) { if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) return NULL; - src = c->cfg.src_addr; + src = phys_to_dma(chan->device->dev, c->cfg.src_addr); dst = buf_addr; info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC; } else { if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) return NULL; - dst = c->cfg.dst_addr; + dst = phys_to_dma(chan->device->dev, c->cfg.dst_addr); src = buf_addr; info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC; @@ -762,7 +1125,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( * note that we need to use GFP_NOWAIT, as the ALSA i2s dmaengine * implementation calls prep_dma_cyclic with interrupts disabled. */ - d = bcm2835_dma_create_cb_chain(chan, direction, true, + d = bcm2835_dma_create_cb_chain(c, direction, true, info, extra, frames, src, dst, buf_len, period_len, GFP_NOWAIT); @@ -770,7 +1133,13 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( return NULL; /* wrap around into a loop */ - d->cb_list[d->frames - 1].cb->next = d->cb_list[0].paddr; + if (c->is_40bit_channel) + ((struct bcm2711_dma40_scb *) + d->cb_list[frames - 1].cb)->next_cb = + to_40bit_cbaddr(d->cb_list[0].paddr); + else + d->cb_list[d->frames - 1].cb->next = c->is_2712 ? + to_40bit_cbaddr(d->cb_list[0].paddr) : d->cb_list[0].paddr; return vchan_tx_prep(&c->vc, &d->vd, flags); } @@ -831,10 +1200,14 @@ static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, c->irq_number = irq; c->irq_flags = irq_flags; - /* check in DEBUG register if this is a LITE channel */ - if (readl(c->chan_base + BCM2835_DMA_DEBUG) & - BCM2835_DMA_DEBUG_LITE) + /* check for 40bit and lite channels */ + if (d->cfg_data->chan_40bit_mask & BIT(chan_id)) + c->is_40bit_channel = true; + else if (readl(c->chan_base + BCM2835_DMA_DEBUG) & + BCM2835_DMA_DEBUG_LITE) c->is_lite_channel = true; + if (d->cfg_data->dma_mask == DMA_BIT_MASK(40)) + c->is_2712 = true; return 0; } @@ -854,7 +1227,9 @@ static void bcm2835_dma_free(struct bcm2835_dmadev *od) } static const struct of_device_id bcm2835_dma_of_match[] = { - { .compatible = "brcm,bcm2835-dma", }, + { .compatible = "brcm,bcm2835-dma", .data = &bcm2835_dma_cfg }, + { .compatible = "brcm,bcm2711-dma", .data = &bcm2711_dma_cfg }, + { .compatible = "brcm,bcm2712-dma", .data = &bcm2712_dma_cfg }, {}, }; MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match); @@ -898,7 +1273,10 @@ static const struct dev_pm_ops bcm2835_dma_pm_ops = { static int bcm2835_dma_probe(struct platform_device *pdev) { + const struct bcm2835_dma_cfg_data *cfg_data; + const struct of_device_id *of_id; struct bcm2835_dmadev *od; + struct resource *res; void __iomem *base; int rc; int i, j; @@ -906,11 +1284,20 @@ static int bcm2835_dma_probe(struct platform_device *pdev) int irq_flags; uint32_t chans_available; char chan_name[BCM2835_DMA_CHAN_NAME_SIZE]; + int chan_count, chan_start, chan_end; + + of_id = of_match_node(bcm2835_dma_of_match, pdev->dev.of_node); + if (!of_id) { + dev_err(&pdev->dev, "Failed to match compatible string\n"); + return -EINVAL; + } + + cfg_data = of_id->data; if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + rc = dma_set_mask_and_coherent(&pdev->dev, cfg_data->dma_mask); if (rc) { dev_err(&pdev->dev, "Unable to set DMA mask\n"); return rc; @@ -922,10 +1309,17 @@ static int bcm2835_dma_probe(struct platform_device *pdev) dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF); - base = devm_platform_ioremap_resource(pdev, 0); + base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(base)) return PTR_ERR(base); + /* The set of channels can be split across multiple instances. */ + chan_start = ((u32)(uintptr_t)base / BCM2835_DMA_CHAN_SIZE) & 0xf; + base -= BCM2835_DMA_CHAN(chan_start); + chan_count = resource_size(res) / BCM2835_DMA_CHAN_SIZE; + chan_end = min(chan_start + chan_count, + BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED + 1); + od->base = base; dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); @@ -961,6 +1355,14 @@ static int bcm2835_dma_probe(struct platform_device *pdev) return -ENOMEM; } + of_id = of_match_node(bcm2835_dma_of_match, pdev->dev.of_node); + if (!of_id) { + dev_err(&pdev->dev, "Failed to match compatible string\n"); + return -EINVAL; + } + + od->cfg_data = cfg_data; + /* Request DMA channel mask from device tree */ if (of_property_read_u32(pdev->dev.of_node, "brcm,dma-channel-mask", @@ -970,8 +1372,36 @@ static int bcm2835_dma_probe(struct platform_device *pdev) goto err_no_dma; } +#ifdef CONFIG_DMA_BCM2708 + /* One channel is reserved for the legacy API */ + if (chans_available & BCM2835_DMA_BULK_MASK) { + rc = bcm_dmaman_probe(pdev, base, + chans_available & BCM2835_DMA_BULK_MASK); + if (rc) + dev_err(&pdev->dev, + "Failed to initialize the legacy API\n"); + + chans_available &= ~BCM2835_DMA_BULK_MASK; + } +#endif + + /* And possibly one for the 40-bit DMA memcpy API */ + if (chans_available & od->cfg_data->chan_40bit_mask & + BIT(BCM2711_DMA_MEMCPY_CHAN)) { + memcpy_parent = od; + memcpy_chan = BCM2835_DMA_CHANIO(base, BCM2711_DMA_MEMCPY_CHAN); + memcpy_scb = dma_alloc_coherent(memcpy_parent->ddev.dev, + sizeof(*memcpy_scb), + &memcpy_scb_dma, GFP_KERNEL); + if (!memcpy_scb) + dev_warn(&pdev->dev, + "Failed to allocated memcpy scb\n"); + + chans_available &= ~BIT(BCM2711_DMA_MEMCPY_CHAN); + } + /* get irqs for each channel that we support */ - for (i = 0; i <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; i++) { + for (i = chan_start; i < chan_end; i++) { /* skip masked out channels */ if (!(chans_available & (1 << i))) { irq[i] = -1; @@ -994,13 +1424,17 @@ static int bcm2835_dma_probe(struct platform_device *pdev) irq[i] = platform_get_irq(pdev, i < 11 ? i : 11); } + chan_count = 0; + /* get irqs for each channel */ - for (i = 0; i <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; i++) { + for (i = chan_start; i < chan_end; i++) { /* skip channels without irq */ if (irq[i] < 0) continue; /* check if there are other channels that also use this irq */ + /* FIXME: This will fail if interrupts are shared across + instances */ irq_flags = 0; for (j = 0; j <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; j++) if ((i != j) && (irq[j] == irq[i])) { @@ -1012,9 +1446,10 @@ static int bcm2835_dma_probe(struct platform_device *pdev) rc = bcm2835_dma_chan_init(od, i, irq[i], irq_flags); if (rc) goto err_no_dma; + chan_count++; } - dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i); + dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", chan_count); /* Device-tree DMA controller registration */ rc = of_dma_controller_register(pdev->dev.of_node, @@ -1044,7 +1479,15 @@ static void bcm2835_dma_remove(struct platform_device *pdev) { struct bcm2835_dmadev *od = platform_get_drvdata(pdev); + bcm_dmaman_remove(pdev); dma_async_device_unregister(&od->ddev); + if (memcpy_parent == od) { + dma_free_coherent(&pdev->dev, sizeof(*memcpy_scb), memcpy_scb, + memcpy_scb_dma); + memcpy_parent = NULL; + memcpy_scb = NULL; + memcpy_chan = NULL; + } bcm2835_dma_free(od); } @@ -1058,7 +1501,22 @@ static struct platform_driver bcm2835_dma_driver = { }, }; -module_platform_driver(bcm2835_dma_driver); +static int bcm2835_dma_init(void) +{ + return platform_driver_register(&bcm2835_dma_driver); +} + +static void bcm2835_dma_exit(void) +{ + platform_driver_unregister(&bcm2835_dma_driver); +} + +/* + * Load after serial driver (arch_initcall) so we see the messages if it fails, + * but before drivers (module_init) that need a DMA channel. + */ +subsys_initcall(bcm2835_dma_init); +module_exit(bcm2835_dma_exit); MODULE_ALIAS("platform:bcm2835-dma"); MODULE_DESCRIPTION("BCM2835 DMA engine driver"); diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index b23536645ff7..79b85c0f10d9 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -12,6 +12,7 @@ #include <linux/device.h> #include <linux/dmaengine.h> #include <linux/dmapool.h> +#include <linux/dma-direct.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/interrupt.h> @@ -95,6 +96,17 @@ axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val) iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4); } +static inline u64 +axi_chan_ioread64(struct axi_dma_chan *chan, u32 reg) +{ + /* + * We split one 64 bit read into two 32 bit reads as some HW doesn't + * support 64 bit access. + */ + return ((u64)ioread32(chan->chan_regs + reg + 4) << 32) + + ioread32(chan->chan_regs + reg); +} + static inline void axi_chan_config_write(struct axi_dma_chan *chan, struct axi_dma_chan_config *config) { @@ -266,7 +278,18 @@ static void axi_dma_hw_init(struct axi_dma_chip *chip) { int ret; u32 i; + int retries = 1000; + axi_dma_iowrite32(chip, DMAC_RESET, 1); + while (axi_dma_ioread32(chip, DMAC_RESET)) { + retries--; + if (!retries) { + dev_err(chip->dev, "%s: DMAC failed to reset\n", + __func__); + return; + } + cpu_relax(); + } for (i = 0; i < chip->dw->hdata->nr_channels; i++) { axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL); axi_chan_disable(&chip->dw->chan[i]); @@ -284,6 +307,15 @@ static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src, return __ffs(src | dst | len | BIT(max_width)); } +static u32 axi_dma_encode_msize(u32 max_burst) +{ + if (max_burst <= 1) + return DWAXIDMAC_BURST_TRANS_LEN_1; + if (max_burst > 1024) + return DWAXIDMAC_BURST_TRANS_LEN_1024; + return fls(max_burst) - 2; +} + static inline const char *axi_chan_name(struct axi_dma_chan *chan) { return dma_chan_name(&chan->vc.chan); @@ -351,6 +383,48 @@ static void vchan_desc_put(struct virt_dma_desc *vdesc) axi_desc_put(vd_to_axi_desc(vdesc)); } +static u32 axi_dma_desc_src_pos(struct axi_dma_desc *desc, dma_addr_t addr) +{ + unsigned int idx = 0; + u32 pos = 0; + + while (pos < desc->length) { + struct axi_dma_hw_desc *hw_desc = &desc->hw_desc[idx++]; + u32 len = hw_desc->len; + dma_addr_t start = le64_to_cpu(hw_desc->lli->sar); + + if (addr >= start && addr <= (start + len)) { + pos += addr - start; + break; + } + + pos += len; + } + + return pos; +} + +static u32 axi_dma_desc_dst_pos(struct axi_dma_desc *desc, dma_addr_t addr) +{ + unsigned int idx = 0; + u32 pos = 0; + + while (pos < desc->length) { + struct axi_dma_hw_desc *hw_desc = &desc->hw_desc[idx++]; + u32 len = hw_desc->len; + dma_addr_t start = le64_to_cpu(hw_desc->lli->dar); + + if (addr >= start && addr <= (start + len)) { + pos += addr - start; + break; + } + + pos += len; + } + + return pos; +} + static enum dma_status dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, struct dma_tx_state *txstate) @@ -360,10 +434,7 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, enum dma_status status; u32 completed_length; unsigned long flags; - u32 completed_blocks; size_t bytes = 0; - u32 length; - u32 len; status = dma_cookie_status(dchan, cookie, txstate); if (status == DMA_COMPLETE || !txstate) @@ -372,16 +443,31 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, spin_lock_irqsave(&chan->vc.lock, flags); vdesc = vchan_find_desc(&chan->vc, cookie); - if (vdesc) { - length = vd_to_axi_desc(vdesc)->length; - completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks; - len = vd_to_axi_desc(vdesc)->hw_desc[0].len; - completed_length = completed_blocks * len; - bytes = length - completed_length; + if (vdesc && vdesc == vchan_next_desc(&chan->vc)) { + /* This descriptor is in-progress */ + struct axi_dma_desc *desc = vd_to_axi_desc(vdesc); + dma_addr_t addr; + + if (chan->direction == DMA_MEM_TO_DEV) { + addr = axi_chan_ioread64(chan, CH_SAR); + completed_length = axi_dma_desc_src_pos(desc, addr); + } else if (chan->direction == DMA_DEV_TO_MEM) { + addr = axi_chan_ioread64(chan, CH_DAR); + completed_length = axi_dma_desc_dst_pos(desc, addr); + } else { + completed_length = 0; + } + bytes = desc->length - completed_length; + } else if (vdesc) { + /* Still in the queue so not started */ + bytes = vd_to_axi_desc(vdesc)->length; } - spin_unlock_irqrestore(&chan->vc.lock, flags); + if (chan->is_paused && status == DMA_IN_PROGRESS) + status = DMA_PAUSED; + dma_set_residue(txstate, bytes); + spin_unlock_irqrestore(&chan->vc.lock, flags); return status; } @@ -435,8 +521,6 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, return; } - axi_dma_enable(chan->chip); - config.dst_multblk_type = DWAXIDMAC_MBLK_TYPE_LL; config.src_multblk_type = DWAXIDMAC_MBLK_TYPE_LL; config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC; @@ -499,9 +583,11 @@ static void dma_chan_issue_pending(struct dma_chan *dchan) { struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); unsigned long flags; + bool was_empty; spin_lock_irqsave(&chan->vc.lock, flags); - if (vchan_issue_pending(&chan->vc)) + was_empty = list_empty(&chan->vc.desc_issued); + if (vchan_issue_pending(&chan->vc) && was_empty) axi_chan_start_first_queued(chan); spin_unlock_irqrestore(&chan->vc.lock, flags); } @@ -569,7 +655,7 @@ static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set) unsigned long reg_value, val; if (!chip->apb_regs) { - dev_err(chip->dev, "apb_regs not initialized\n"); + dev_dbg(chip->dev, "apb_regs not initialized\n"); return; } @@ -657,34 +743,53 @@ static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan, size_t axi_block_ts; size_t block_ts; u32 ctllo, ctlhi; - u32 burst_len; + u32 burst_len = 0, mem_burst_msize, reg_burst_msize; axi_block_ts = chan->chip->dw->hdata->block_size[chan->id]; mem_width = __ffs(data_width | mem_addr | len); - if (mem_width > DWAXIDMAC_TRANS_WIDTH_32) - mem_width = DWAXIDMAC_TRANS_WIDTH_32; if (!IS_ALIGNED(mem_addr, 4)) { dev_err(chan->chip->dev, "invalid buffer alignment\n"); return -EINVAL; } + /* Use a reasonable upper limit otherwise residue reporting granularity grows large */ + mem_burst_msize = axi_dma_encode_msize(16); + switch (chan->direction) { case DMA_MEM_TO_DEV: + reg_burst_msize = axi_dma_encode_msize(chan->config.dst_maxburst); reg_width = __ffs(chan->config.dst_addr_width); - device_addr = chan->config.dst_addr; + device_addr = phys_to_dma(chan->chip->dev, chan->config.dst_addr); ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS | mem_width << CH_CTL_L_SRC_WIDTH_POS | + reg_burst_msize << CH_CTL_L_DST_MSIZE_POS | + mem_burst_msize << CH_CTL_L_SRC_MSIZE_POS | DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS | DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS; block_ts = len >> mem_width; break; case DMA_DEV_TO_MEM: + reg_burst_msize = axi_dma_encode_msize(chan->config.src_maxburst); reg_width = __ffs(chan->config.src_addr_width); - device_addr = chan->config.src_addr; + /* + * For devices where transfer lengths are not known upfront, + * there is a danger when the destination is wider than the + * source that partial words can be lost at the end of a transfer. + * Ideally the controller would be able to flush the residue, but + * it can't - it's not even possible to tell that there is any. + * Instead, allow the client driver to avoid the problem by setting + * a smaller width. + */ + if (chan->config.dst_addr_width && + (chan->config.dst_addr_width < mem_width)) + mem_width = chan->config.dst_addr_width; + device_addr = phys_to_dma(chan->chip->dev, chan->config.src_addr); ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS | mem_width << CH_CTL_L_DST_WIDTH_POS | + mem_burst_msize << CH_CTL_L_DST_MSIZE_POS | + reg_burst_msize << CH_CTL_L_SRC_MSIZE_POS | DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS | DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS; block_ts = len >> reg_width; @@ -720,14 +825,17 @@ static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan, } hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1); - - ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS | - DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS; hw_desc->lli->ctl_lo = cpu_to_le32(ctllo); set_desc_src_master(hw_desc); hw_desc->len = len; + + if (burst_len && (chan->config.src_maxburst > burst_len)) + dev_warn_ratelimited(chan2dev(chan), + "%s: requested source burst length %u exceeds supported burst length %u - data may be lost\n", + axi_chan_name(chan), chan->config.src_maxburst, burst_len); + return 0; } @@ -744,9 +852,6 @@ static size_t calculate_block_len(struct axi_dma_chan *chan, case DMA_MEM_TO_DEV: data_width = BIT(chan->chip->dw->hdata->m_data_width); mem_width = __ffs(data_width | dma_addr | buf_len); - if (mem_width > DWAXIDMAC_TRANS_WIDTH_32) - mem_width = DWAXIDMAC_TRANS_WIDTH_32; - block_len = axi_block_ts << mem_width; break; case DMA_DEV_TO_MEM: @@ -817,6 +922,8 @@ dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr, src_addr += segment_len; } + desc->nr_hw_descs = total_segments; + llp = desc->hw_desc[0].llp; /* Managed transfer list */ @@ -882,6 +989,9 @@ dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, mem = sg_dma_address(sg); len = sg_dma_len(sg); num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len); + if (!num_segments) + continue; + segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments); do { @@ -896,6 +1006,8 @@ dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, } while (len >= segment_len); } + desc->nr_hw_descs = loop; + /* Set end-of-link to the last link descriptor of list */ set_desc_last(&desc->hw_desc[num_sgs - 1]); @@ -1003,6 +1115,8 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, num++; } + desc->nr_hw_descs = num; + /* Set end-of-link to the last link descriptor of list */ set_desc_last(&desc->hw_desc[num - 1]); /* Managed transfer list */ @@ -1051,7 +1165,7 @@ static void axi_chan_dump_lli(struct axi_dma_chan *chan, static void axi_chan_list_dump_lli(struct axi_dma_chan *chan, struct axi_dma_desc *desc_head) { - int count = atomic_read(&chan->descs_allocated); + int count = desc_head->nr_hw_descs; int i; for (i = 0; i < count; i++) @@ -1094,11 +1208,11 @@ out: static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan) { - int count = atomic_read(&chan->descs_allocated); struct axi_dma_hw_desc *hw_desc; struct axi_dma_desc *desc; struct virt_dma_desc *vd; unsigned long flags; + int count; u64 llp; int i; @@ -1120,6 +1234,7 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan) if (chan->cyclic) { desc = vd_to_axi_desc(vd); if (desc) { + count = desc->nr_hw_descs; llp = lo_hi_readq(chan->chan_regs + CH_LLP); for (i = 0; i < count; i++) { hw_desc = &desc->hw_desc[i]; @@ -1397,6 +1512,10 @@ static int parse_device_properties(struct axi_dma_chip *chip) chip->dw->hdata->nr_masters = tmp; + ret = device_property_read_u32(dev, "snps,dma-targets", &tmp); + if (!ret && tmp > 16) + chip->dw->hdata->use_cfg2 = true; + ret = device_property_read_u32(dev, "snps,data-width", &tmp); if (ret) return ret; @@ -1467,6 +1586,7 @@ static int dw_probe(struct platform_device *pdev) struct dw_axi_dma *dw; struct dw_axi_dma_hcfg *hdata; struct reset_control *resets; + unsigned int max_seg_size; unsigned int flags; u32 i; int ret; @@ -1577,9 +1697,21 @@ static int dw_probe(struct platform_device *pdev) * Synopsis DesignWare AxiDMA datasheet mentioned Maximum * supported blocks is 1024. Device register width is 4 bytes. * Therefore, set constraint to 1024 * 4. + * However, if all channels specify a greater value, use that instead. */ + dw->dma.dev->dma_parms = &dw->dma_parms; - dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE); + max_seg_size = UINT_MAX; + for (i = 0; i < dw->hdata->nr_channels; i++) { + unsigned int block_size = chip->dw->hdata->block_size[i]; + + if (!block_size) + block_size = MAX_BLOCK_SIZE; + max_seg_size = min(block_size, max_seg_size); + } + + dma_set_max_seg_size(&pdev->dev, max_seg_size); + platform_set_drvdata(pdev, chip); pm_runtime_enable(chip->dev); |
