aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig12
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-bcm2835.c32
-rw-r--r--drivers/spi/spi-dw-core.c132
-rw-r--r--drivers/spi/spi-dw-dma.c45
-rw-r--r--drivers/spi/spi-dw-mmio.c8
-rw-r--r--drivers/spi/spi-dw.h3
-rw-r--r--drivers/spi/spi-gpio.c100
-rw-r--r--drivers/spi/spi-rp2040-gpio-bridge.c1244
-rw-r--r--drivers/spi/spi.c9
-rw-r--r--drivers/spi/spidev.c5
11 files changed, 1504 insertions, 87 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 55675750182e..c29e961d705d 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -931,6 +931,18 @@ config SPI_RB4XX
help
SPI controller driver for the Mikrotik RB4xx series boards.
+config SPI_RP2040_GPIO_BRIDGE
+ tristate "Raspberry Pi RP2040 GPIO Bridge"
+ depends on I2C && SPI && GPIOLIB
+ help
+ Support for the Raspberry Pi RP2040 GPIO bridge.
+
+ This driver provides support for the Raspberry Pi PR2040 GPIO bridge.
+ It can be used as a GPIO expander and a Tx-only SPI master.
+
+ Optionally, this driver is able to take advantage of Raspberry Pi RP1
+ GPIOs to achieve faster than I2C data transfer rates.
+
config SPI_RPCIF
tristate "Renesas RPC-IF SPI driver"
depends on RENESAS_RPCIF
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 8ff74a13faaa..ed8bd761fa9f 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -126,6 +126,7 @@ obj-$(CONFIG_SPI_ROCKCHIP_SFC) += spi-rockchip-sfc.o
obj-$(CONFIG_SPI_RB4XX) += spi-rb4xx.o
obj-$(CONFIG_MACH_REALTEK_RTL) += spi-realtek-rtl.o
obj-$(CONFIG_SPI_REALTEK_SNAND) += spi-realtek-rtl-snand.o
+obj-$(CONFIG_SPI_RP2040_GPIO_BRIDGE) += spi-rp2040-gpio-bridge.o
obj-$(CONFIG_SPI_RPCIF) += spi-rpc-if.o
obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
obj-$(CONFIG_SPI_RZV2H_RSPI) += spi-rzv2h-rspi.o
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 192cc5ef65fb..d034068220db 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -119,6 +119,7 @@ MODULE_PARM_DESC(polling_limit_us,
*/
struct bcm2835_spi {
void __iomem *regs;
+ phys_addr_t phys_addr;
struct clk *clk;
struct gpio_desc *cs_gpio;
unsigned long clk_hz;
@@ -891,19 +892,8 @@ static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
struct bcm2835_spi *bs)
{
struct dma_slave_config slave_config;
- const __be32 *addr;
- dma_addr_t dma_reg_base;
int ret;
- /* base address in dma-space */
- addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
- if (!addr) {
- dev_err(dev, "could not get DMA-register address - not using dma mode\n");
- /* Fall back to interrupt mode */
- return 0;
- }
- dma_reg_base = be32_to_cpup(addr);
-
/* get tx/rx dma */
ctlr->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(ctlr->dma_tx)) {
@@ -925,7 +915,7 @@ static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
* or, in case of an RX-only transfer, cyclically copies from the zero
* page to the FIFO using a preallocated, reusable descriptor.
*/
- slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
+ slave_config.dst_addr = bs->phys_addr + BCM2835_SPI_FIFO;
slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
ret = dmaengine_slave_config(ctlr->dma_tx, &slave_config);
@@ -964,9 +954,9 @@ static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
* RX FIFO or, in case of a TX-only transfer, cyclically writes a
* precalculated value to the CS register to clear the RX FIFO.
*/
- slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
+ slave_config.src_addr = bs->phys_addr + BCM2835_SPI_FIFO;
slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_CS);
+ slave_config.dst_addr = bs->phys_addr + BCM2835_SPI_CS;
slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config);
@@ -1059,6 +1049,16 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
unsigned long hz_per_byte, byte_limit;
u32 cs = target->prepare_cs;
+ if (unlikely(!tfr->len)) {
+ static int warned;
+
+ if (!warned)
+ dev_warn(&spi->dev,
+ "zero-length SPI transfer ignored\n");
+ warned = 1;
+ return 0;
+ }
+
/* set clock */
spi_hz = tfr->speed_hz;
@@ -1350,6 +1350,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
struct bcm2835_spi *bs;
+ struct resource *iomem;
int err;
ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*bs));
@@ -1373,10 +1374,11 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
bs = spi_controller_get_devdata(ctlr);
bs->ctlr = ctlr;
- bs->regs = devm_platform_ioremap_resource(pdev, 0);
+ bs->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &iomem);
if (IS_ERR(bs->regs))
return PTR_ERR(bs->regs);
+ bs->phys_addr = iomem->start;
bs->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(bs->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(bs->clk),
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index b3b883cb9541..fd000d131b48 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -100,7 +100,8 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable)
* support active-high or active-low CS level.
*/
if (cs_high == enable)
- dw_writel(dws, DW_SPI_SER, BIT(spi_get_chipselect(spi, 0)));
+ dw_writel(dws, DW_SPI_SER,
+ BIT(spi_get_csgpiod(spi, 0) ? 0 : spi_get_chipselect(spi, 0)));
else
dw_writel(dws, DW_SPI_SER, 0);
}
@@ -201,7 +202,18 @@ int dw_spi_check_status(struct dw_spi *dws, bool raw)
/* Generically handle the erroneous situation */
if (ret) {
- dw_spi_reset_chip(dws);
+ /*
+ * Forcibly halting the controller can cause DMA to hang.
+ * Defer to dw_spi_handle_err outside of interrupt context
+ * and mask further interrupts for the current transfer.
+ */
+ if (dws->dma_mapped) {
+ dw_spi_mask_intr(dws, 0xff);
+ dw_readl(dws, DW_SPI_ICR);
+ } else {
+ dw_spi_reset_chip(dws);
+ }
+
if (dws->host->cur_msg)
dws->host->cur_msg->status = ret;
}
@@ -210,6 +222,32 @@ int dw_spi_check_status(struct dw_spi *dws, bool raw)
}
EXPORT_SYMBOL_NS_GPL(dw_spi_check_status, "SPI_DW_CORE");
+static inline bool dw_spi_ctlr_busy(struct dw_spi *dws)
+{
+ return dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_BUSY;
+}
+
+static enum hrtimer_restart dw_spi_hrtimer_handler(struct hrtimer *hr)
+{
+ struct dw_spi *dws = container_of(hr, struct dw_spi, hrtimer);
+
+ if (!dw_spi_ctlr_busy(dws)) {
+ spi_finalize_current_transfer(dws->host);
+ return HRTIMER_NORESTART;
+ }
+
+ if (!dws->idle_wait_retries) {
+ dev_err(&dws->host->dev, "controller stuck at busy\n");
+ spi_finalize_current_transfer(dws->host);
+ return HRTIMER_NORESTART;
+ }
+
+ dws->idle_wait_retries--;
+ hrtimer_forward_now(hr, dws->idle_wait_interval);
+
+ return HRTIMER_RESTART;
+}
+
static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
{
u16 irq_status = dw_readl(dws, DW_SPI_ISR);
@@ -226,12 +264,32 @@ static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
* final stage of the transfer. By doing so we'll get the next IRQ
* right when the leftover incoming data is received.
*/
- dw_reader(dws);
- if (!dws->rx_len) {
- dw_spi_mask_intr(dws, 0xff);
- spi_finalize_current_transfer(dws->host);
- } else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) {
- dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
+ if (dws->rx_len) {
+ dw_reader(dws);
+ if (!dws->rx_len) {
+ dw_spi_mask_intr(dws, 0xff);
+ spi_finalize_current_transfer(dws->host);
+ } else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) {
+ dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
+ }
+ } else if (!dws->tx_len) {
+ dw_spi_mask_intr(dws, DW_SPI_INT_TXEI);
+ if (dw_spi_ctlr_busy(dws)) {
+ ktime_t period = ns_to_ktime(DIV_ROUND_UP(NSEC_PER_SEC, dws->current_freq));
+
+ /*
+ * Make the initial wait an underestimate of how long the transfer
+ * should take, then poll rapidly to reduce the delay
+ */
+ hrtimer_start(&dws->hrtimer,
+ period * (8 * dws->n_bytes - 1),
+ HRTIMER_MODE_REL);
+ dws->idle_wait_retries = 10;
+ dws->idle_wait_interval = period;
+ } else {
+ spi_finalize_current_transfer(dws->host);
+ }
+ return IRQ_HANDLED;
}
/*
@@ -241,8 +299,12 @@ static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
*/
if (irq_status & DW_SPI_INT_TXEI) {
dw_writer(dws);
- if (!dws->tx_len)
- dw_spi_mask_intr(dws, DW_SPI_INT_TXEI);
+ if (!dws->tx_len) {
+ if (dws->rx_len)
+ dw_spi_mask_intr(dws, DW_SPI_INT_TXEI);
+ else
+ dw_writel(dws, DW_SPI_TXFTLR, 0);
+ }
}
return IRQ_HANDLED;
@@ -337,7 +399,7 @@ void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
dw_writel(dws, DW_SPI_CTRLR1, cfg->ndf ? cfg->ndf - 1 : 0);
/* Note DW APB SSI clock divider doesn't support odd numbers */
- clk_div = (DIV_ROUND_UP(dws->max_freq, cfg->freq) + 1) & 0xfffe;
+ clk_div = min(DIV_ROUND_UP(dws->max_freq, cfg->freq) + 1, 0xfffe) & 0xfffe;
speed_hz = dws->max_freq / clk_div;
if (dws->current_freq != speed_hz) {
@@ -363,15 +425,18 @@ static void dw_spi_irq_setup(struct dw_spi *dws)
* will be adjusted at the final stage of the IRQ-based SPI transfer
* execution so not to lose the leftover of the incoming data.
*/
- level = min_t(unsigned int, dws->fifo_len / 2, dws->tx_len);
+ level = min_t(unsigned int, dws->fifo_len / 2, dws->tx_len ? dws->tx_len : dws->rx_len);
dw_writel(dws, DW_SPI_TXFTLR, level);
dw_writel(dws, DW_SPI_RXFTLR, level - 1);
dws->transfer_handler = dw_spi_transfer_handler;
- imask = DW_SPI_INT_TXEI | DW_SPI_INT_TXOI |
- DW_SPI_INT_RXUI | DW_SPI_INT_RXOI | DW_SPI_INT_RXFI;
+ imask = DW_SPI_INT_TXEI | DW_SPI_INT_TXOI;
+ if (dws->rx_len)
+ imask |= DW_SPI_INT_RXUI | DW_SPI_INT_RXOI | DW_SPI_INT_RXFI;
dw_spi_umask_intr(dws, imask);
+ if (!dws->tx_len)
+ dw_writel(dws, DW_SPI_DR, 0);
}
/*
@@ -394,18 +459,23 @@ static int dw_spi_poll_transfer(struct dw_spi *dws,
delay.unit = SPI_DELAY_UNIT_SCK;
nbits = dws->n_bytes * BITS_PER_BYTE;
+ if (!dws->tx_len)
+ dw_writel(dws, DW_SPI_DR, 0);
+
do {
- dw_writer(dws);
+ if (dws->tx_len)
+ dw_writer(dws);
delay.value = nbits * (dws->rx_len - dws->tx_len);
spi_delay_exec(&delay, transfer);
- dw_reader(dws);
+ if (dws->rx_len)
+ dw_reader(dws);
ret = dw_spi_check_status(dws, true);
if (ret)
return ret;
- } while (dws->rx_len);
+ } while (dws->rx_len || dws->tx_len || dw_spi_ctlr_busy(dws));
return 0;
}
@@ -420,6 +490,7 @@ static int dw_spi_transfer_one(struct spi_controller *host,
.dfs = transfer->bits_per_word,
.freq = transfer->speed_hz,
};
+ int buswidth;
int ret;
dws->dma_mapped = 0;
@@ -429,6 +500,23 @@ static int dw_spi_transfer_one(struct spi_controller *host,
dws->rx = transfer->rx_buf;
dws->rx_len = dws->tx_len;
+ if (!dws->rx) {
+ dws->rx_len = 0;
+ cfg.tmode = DW_SPI_CTRLR0_TMOD_TO;
+ }
+
+ if (!dws->rx) {
+ dws->rx_len = 0;
+ cfg.tmode = DW_SPI_CTRLR0_TMOD_TO;
+ }
+ if (!dws->tx) {
+ dws->tx_len = 0;
+ cfg.tmode = DW_SPI_CTRLR0_TMOD_RO;
+ cfg.ndf = dws->rx_len;
+ }
+ buswidth = transfer->rx_buf ? transfer->rx_nbits :
+ (transfer->tx_buf ? transfer->tx_nbits : 1);
+
/* Ensure the data above is visible for all CPUs */
smp_mb();
@@ -607,11 +695,6 @@ static int dw_spi_write_then_read(struct dw_spi *dws, struct spi_device *spi)
return 0;
}
-static inline bool dw_spi_ctlr_busy(struct dw_spi *dws)
-{
- return dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_BUSY;
-}
-
static int dw_spi_wait_mem_op_done(struct dw_spi *dws)
{
int retry = DW_SPI_WAIT_RETRIES;
@@ -965,10 +1048,12 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
dev_warn(dev, "DMA init failed\n");
} else {
host->can_dma = dws->dma_ops->can_dma;
- host->flags |= SPI_CONTROLLER_MUST_TX;
}
}
+ hrtimer_init(&dws->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ dws->hrtimer.function = dw_spi_hrtimer_handler;
+
ret = spi_register_controller(host);
if (ret) {
dev_err_probe(dev, ret, "problem registering spi host\n");
@@ -994,6 +1079,7 @@ void dw_spi_remove_host(struct dw_spi *dws)
{
dw_spi_debugfs_remove(dws);
+ hrtimer_cancel(&dws->hrtimer);
spi_unregister_controller(dws->host);
if (dws->dma_ops && dws->dma_ops->dma_exit)
diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c
index b5bed02b7e50..c34c202d617e 100644
--- a/drivers/spi/spi-dw-dma.c
+++ b/drivers/spi/spi-dw-dma.c
@@ -6,6 +6,7 @@
*/
#include <linux/completion.h>
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/irqreturn.h>
@@ -329,7 +330,6 @@ static int dw_spi_dma_config_tx(struct dw_spi *dws)
txconf.direction = DMA_MEM_TO_DEV;
txconf.dst_addr = dws->dma_addr;
txconf.dst_maxburst = dws->txburst;
- txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
txconf.device_fc = false;
@@ -430,7 +430,6 @@ static int dw_spi_dma_config_rx(struct dw_spi *dws)
rxconf.direction = DMA_DEV_TO_MEM;
rxconf.src_addr = dws->dma_addr;
rxconf.src_maxburst = dws->rxburst;
- rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
rxconf.device_fc = false;
@@ -470,13 +469,12 @@ static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
u16 imr, dma_ctrl;
int ret;
- if (!xfer->tx_buf)
- return -EINVAL;
-
/* Setup DMA channels */
- ret = dw_spi_dma_config_tx(dws);
- if (ret)
- return ret;
+ if (xfer->tx_buf) {
+ ret = dw_spi_dma_config_tx(dws);
+ if (ret)
+ return ret;
+ }
if (xfer->rx_buf) {
ret = dw_spi_dma_config_rx(dws);
@@ -485,13 +483,17 @@ static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
}
/* Set the DMA handshaking interface */
- dma_ctrl = DW_SPI_DMACR_TDMAE;
+ dma_ctrl = 0;
+ if (xfer->tx_buf)
+ dma_ctrl |= DW_SPI_DMACR_TDMAE;
if (xfer->rx_buf)
dma_ctrl |= DW_SPI_DMACR_RDMAE;
dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
/* Set the interrupt mask */
- imr = DW_SPI_INT_TXOI;
+ imr = 0;
+ if (xfer->tx_buf)
+ imr |= DW_SPI_INT_TXOI;
if (xfer->rx_buf)
imr |= DW_SPI_INT_RXUI | DW_SPI_INT_RXOI;
dw_spi_umask_intr(dws, imr);
@@ -508,15 +510,16 @@ static int dw_spi_dma_transfer_all(struct dw_spi *dws,
{
int ret;
- /* Submit the DMA Tx transfer */
- ret = dw_spi_dma_submit_tx(dws, xfer->tx_sg.sgl, xfer->tx_sg.nents);
- if (ret)
- goto err_clear_dmac;
+ /* Submit the DMA Tx transfer if required */
+ if (xfer->tx_buf) {
+ ret = dw_spi_dma_submit_tx(dws, xfer->tx_sg.sgl, xfer->tx_sg.nents);
+ if (ret)
+ goto err_clear_dmac;
+ }
/* Submit the DMA Rx transfer if required */
if (xfer->rx_buf) {
- ret = dw_spi_dma_submit_rx(dws, xfer->rx_sg.sgl,
- xfer->rx_sg.nents);
+ ret = dw_spi_dma_submit_rx(dws, xfer->rx_sg.sgl, xfer->rx_sg.nents);
if (ret)
goto err_clear_dmac;
@@ -524,7 +527,15 @@ static int dw_spi_dma_transfer_all(struct dw_spi *dws,
dma_async_issue_pending(dws->rxchan);
}
- dma_async_issue_pending(dws->txchan);
+ if (xfer->tx_buf) {
+ dma_async_issue_pending(dws->txchan);
+ } else {
+ /* Pause to allow DMA channel to fetch RX descriptor */
+ usleep_range(5, 10);
+
+ /* Write something to the TX FIFO to start the transfer */
+ dw_writel(dws, DW_SPI_DR, 0);
+ }
ret = dw_spi_dma_wait(dws, xfer->len, xfer->effective_speed_hz);
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 7a5197586919..8ab9bc488b4e 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -20,6 +20,7 @@
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <linux/interrupt.h>
#include "spi-dw.h"
@@ -341,8 +342,11 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
dws->paddr = mem->start;
dws->irq = platform_get_irq(pdev, 0);
- if (dws->irq < 0)
- return dws->irq; /* -ENXIO */
+ if (dws->irq < 0) {
+ if (dws->irq != -ENXIO)
+ return dws->irq; /* -ENXIO */
+ dws->irq = IRQ_NOTCONNECTED;
+ }
dwsmmio->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(dwsmmio->clk))
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index fc267c6437ae..92e54a51fc46 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -180,6 +180,9 @@ struct dw_spi {
u32 current_freq; /* frequency in hz */
u32 cur_rx_sample_dly;
u32 def_rx_sample_dly_ns;
+ struct hrtimer hrtimer;
+ ktime_t idle_wait_interval;
+ int idle_wait_retries;
/* Custom memory operations */
struct spi_controller_mem_ops mem_ops;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index c8dadb532c40..4a0497f0ba0a 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
@@ -35,6 +36,8 @@ struct spi_gpio {
struct gpio_desc *miso;
struct gpio_desc *mosi;
struct gpio_desc **cs_gpios;
+ bool sck_idle_input;
+ bool cs_dont_invert;
};
/*----------------------------------------------------------------------*/
@@ -80,12 +83,18 @@ static inline int getmiso(const struct spi_device *spi)
}
/*
- * NOTE: this clocks "as fast as we can". It "should" be a function of the
- * requested device clock. Software overhead means we usually have trouble
- * reaching even one Mbit/sec (except when we can inline bitops), so for now
- * we'll just assume we never need additional per-bit slowdowns.
+ * Generic bit-banged GPIO SPI might free-run at something in the range
+ * 1Mbps ~ 10Mbps (depending on the platform), and some SPI devices may
+ * need to be clocked at a lower rate. ndelay() is often implemented by
+ * udelay() with rounding up, so do the delay only for nsecs >= 500
+ * (<= 1Mbps). The conditional test adds a small overhead.
*/
-#define spidelay(nsecs) do {} while (0)
+
+static inline void spidelay(unsigned long nsecs)
+{
+ if (nsecs >= 500)
+ ndelay(nsecs);
+}
#include "spi-bitbang-txrx.h"
@@ -196,16 +205,29 @@ static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
/* set initial clock line level */
- if (is_active)
- gpiod_set_value_cansleep(spi_gpio->sck, spi->mode & SPI_CPOL);
+ if (is_active) {
+ if (spi_gpio->sck_idle_input)
+ gpiod_direction_output(spi_gpio->sck, spi->mode & SPI_CPOL);
+ else
+ gpiod_set_value_cansleep(spi_gpio->sck, spi->mode & SPI_CPOL);
+ }
- /* Drive chip select line, if we have one */
+ /*
+ * Drive chip select line, if we have one.
+ * SPI chip selects are normally active-low, but when
+ * cs_dont_invert is set, we assume their polarity is
+ * controlled by the GPIO, and write '1' to assert.
+ */
if (spi_gpio->cs_gpios) {
struct gpio_desc *cs = spi_gpio->cs_gpios[spi_get_chipselect(spi, 0)];
+ int val = ((spi->mode & SPI_CS_HIGH) || spi_gpio->cs_dont_invert) ?
+ is_active : !is_active;
- /* SPI chip selects are normally active-low */
- gpiod_set_value_cansleep(cs, (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
+ gpiod_set_value_cansleep(cs, val);
}
+
+ if (spi_gpio->sck_idle_input && !is_active)
+ gpiod_direction_input(spi_gpio->sck);
}
static void spi_gpio_set_mosi_idle(struct spi_device *spi)
@@ -225,11 +247,14 @@ static int spi_gpio_setup(struct spi_device *spi)
/*
* The CS GPIOs have already been
* initialized from the descriptor lookup.
+ * Here we set them to the non-asserted state.
*/
if (spi_gpio->cs_gpios) {
cs = spi_gpio->cs_gpios[spi_get_chipselect(spi, 0)];
if (!spi->controller_state && cs) {
- ret = gpiod_direction_output(cs, !(spi->mode & SPI_CS_HIGH));
+ ret = gpiod_direction_output(cs,
+ !((spi->mode & SPI_CS_HIGH) ||
+ spi_gpio->cs_dont_invert));
if (ret)
return ret;
}
@@ -301,35 +326,36 @@ static int spi_gpio_request(struct device *dev, struct spi_gpio *spi_gpio)
if (IS_ERR(spi_gpio->miso))
return PTR_ERR(spi_gpio->miso);
+ spi_gpio->sck_idle_input = device_property_read_bool(dev, "sck-idle-input");
spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
return PTR_ERR_OR_ZERO(spi_gpio->sck);
}
-static int spi_gpio_probe_pdata(struct platform_device *pdev,
- struct spi_controller *host)
+/*
+ * In order to implement "sck-idle-input" (which requires SCK
+ * direction and CS level to be switched in a particular order),
+ * we need to control GPIO chip selects from within this driver.
+ */
+
+static int spi_gpio_probe_get_cs_gpios(struct device *dev,
+ struct spi_controller *master,
+ bool gpio_defines_polarity)
{
- struct device *dev = &pdev->dev;
- struct spi_gpio_platform_data *pdata = dev_get_platdata(dev);
- struct spi_gpio *spi_gpio = spi_controller_get_devdata(host);
int i;
+ struct spi_gpio *spi_gpio = spi_controller_get_devdata(master);
- if (!pdata)
- return -ENODEV;
-
- /* It's just one always-selected device, fine to continue */
- if (!pdata->num_chipselect)
- return 0;
-
- host->num_chipselect = pdata->num_chipselect;
- spi_gpio->cs_gpios = devm_kcalloc(dev, host->num_chipselect,
+ spi_gpio->cs_dont_invert = gpio_defines_polarity;
+ spi_gpio->cs_gpios = devm_kcalloc(dev, master->num_chipselect,
sizeof(*spi_gpio->cs_gpios),
GFP_KERNEL);
if (!spi_gpio->cs_gpios)
return -ENOMEM;
- for (i = 0; i < host->num_chipselect; i++) {
- spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs", i,
- GPIOD_OUT_HIGH);
+ for (i = 0; i < master->num_chipselect; i++) {
+ spi_gpio->cs_gpios[i] =
+ devm_gpiod_get_index(dev, "cs", i,
+ gpio_defines_polarity ?
+ GPIOD_OUT_LOW : GPIOD_OUT_HIGH);
if (IS_ERR(spi_gpio->cs_gpios[i]))
return PTR_ERR(spi_gpio->cs_gpios[i]);
}
@@ -337,6 +363,24 @@ static int spi_gpio_probe_pdata(struct platform_device *pdev,
return 0;
}
+static int spi_gpio_probe_pdata(struct platform_device *pdev,
+ struct spi_controller *host)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_gpio_platform_data *pdata = dev_get_platdata(dev);
+
+ if (!pdata)
+ return -ENODEV;
+
+ /*
+ * The host needs to think there is a chipselect even if not
+ * connected
+ */
+ host->num_chipselect = pdata->num_chipselect ?: 1;
+
+ return spi_gpio_probe_get_cs_gpios(dev, host, false);
+}
+
static int spi_gpio_probe(struct platform_device *pdev)
{
int status;
diff --git a/drivers/spi/spi-rp2040-gpio-bridge.c b/drivers/spi/spi-rp2040-gpio-bridge.c
new file mode 100644
index 000000000000..6d7ce5a4a11d
--- /dev/null
+++ b/drivers/spi/spi-rp2040-gpio-bridge.c
@@ -0,0 +1,1244 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RP2040 GPIO Bridge
+ *
+ * Copyright (C) 2023, 2024, Raspberry Pi Ltd
+ */
+
+#include <crypto/hash.h>
+#include <linux/debugfs.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/minmax.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+#define MODULE_NAME "rp2040-gpio-bridge"
+
+#define I2C_RETRIES 4U
+
+#define ONE_KIB 1024U
+#define MD5_SUFFIX_SIZE 9U
+
+#define RP2040_GBDG_FLASH_BLOCK_SIZE (8U * ONE_KIB)
+#define RP2040_GBDG_BLOCK_SIZE (RP2040_GBDG_FLASH_BLOCK_SIZE - MD5_SUFFIX_SIZE)
+
+/*
+ * 1MiB transfer size is an arbitrary limit
+ * Max value is 4173330 (using a single manifest)
+ */
+#define MAX_TRANSFER_SIZE (1024U * ONE_KIB)
+
+#define HALF_BUFFER (4U * ONE_KIB)
+
+#define STATUS_SIZE 4
+#define MD5_DIGEST_SIZE 16
+#define VERSION_SIZE 4
+#define ID_SIZE 8
+#define TOTAL_RD_HDR_SIZE \
+ (STATUS_SIZE + MD5_DIGEST_SIZE + VERSION_SIZE + ID_SIZE)
+
+struct rp2040_gbdg_device_info {
+ u8 md5[MD5_DIGEST_SIZE];
+ u64 id;
+ u32 version;
+ u32 status;
+};
+
+static_assert(sizeof(struct rp2040_gbdg_device_info) == TOTAL_RD_HDR_SIZE);
+
+#define MANIFEST_UNIT_SIZE 16
+static_assert(MD5_DIGEST_SIZE == MANIFEST_UNIT_SIZE);
+#define MANIFEST_HEADER_UNITS 1
+#define MANIFEST_DATA_UNITS \
+ DIV_ROUND_UP(MAX_TRANSFER_SIZE, RP2040_GBDG_BLOCK_SIZE)
+
+#define STATUS_BUSY 0x01
+
+#define DIRECT_PREFIX 0x00
+#define DIRECT_CMD_CS 0x07
+#define DIRECT_CMD_EMIT 0x08
+
+#define WRITE_DATA_PREFIX 0x80
+#define WRITE_DATA_PREFIX_SIZE 1
+
+#define FIXED_SIZE_CMD_PREFIX 0x81
+
+#define WRITE_DATA_UPPER_PREFIX 0x82
+#define WRITE_DATA_UPPER_PREFIX_SIZE 1
+
+#define NUM_GPIO 24
+
+enum rp2040_gbdg_fixed_size_commands {
+ /* 10-byte commands */
+ CMD_SAVE_CACHE = 0x07,
+ CMD_SEND_RB = 0x08,
+ CMD_GPIO_ST_CL = 0x0b,
+ CMD_GPIO_OE = 0x0c,
+ CMD_DAT_RECV = 0x0d,
+ CMD_DAT_EMIT = 0x0e,
+ /* 18-byte commands */
+ CMD_READ_CSUM = 0x11,
+ CMD_SEND_MANI = 0x13,
+};
+
+struct rp2040_gbdg {
+ struct spi_controller *controller;
+
+ struct dentry *debugfs;
+ size_t transfer_progress;
+
+ struct i2c_client *client;
+ struct crypto_shash *shash;
+ struct shash_desc *shash_desc;
+
+ struct regulator *regulator;
+
+ struct gpio_chip gc;
+ u32 gpio_requested;
+ u32 gpio_direction;
+
+ bool fast_xfer_requires_i2c_lock;
+ struct gpio_descs *fast_xfer_gpios;
+ u32 fast_xfer_recv_gpio_base;
+ u8 fast_xfer_data_index;
+ u8 fast_xfer_clock_index;
+ void __iomem *gpio_base;
+ void __iomem *rio_base;
+
+ bool bypass_cache;
+
+ u8 buffer[2 + HALF_BUFFER];
+ u8 manifest_prep[(MANIFEST_HEADER_UNITS + MANIFEST_DATA_UNITS) *
+ MANIFEST_UNIT_SIZE];
+};
+
+static int rp2040_gbdg_gpio_dir_in(struct gpio_chip *gc, unsigned int offset);
+static void rp2040_gbdg_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value);
+static int rp2040_gbdg_fast_xfer(struct rp2040_gbdg *priv_data, const u8 *data,
+ size_t len);
+
+static int rp2040_gbdg_rp1_calc_offsets(u8 gpio, size_t *bank_offset,
+ u8 *shift_offset)
+{
+ if (!bank_offset || !shift_offset || gpio >= 54)
+ return -EINVAL;
+ if (gpio < 28) {
+ *bank_offset = 0x0000;
+ *shift_offset = gpio;
+ } else if (gpio < 34) {
+ *bank_offset = 0x4000;
+ *shift_offset = gpio - 28;
+ } else {
+ *bank_offset = 0x8000;
+ *shift_offset = gpio - 34;
+ }
+
+ return 0;
+}
+
+static int rp2040_gbdg_calc_mux_offset(u8 gpio, size_t *offset)
+{
+ size_t bank_offset;
+ u8 shift_offset;
+ int ret;
+
+ ret = rp2040_gbdg_rp1_calc_offsets(gpio, &bank_offset, &shift_offset);
+ if (ret)
+ return ret;
+ *offset = bank_offset + shift_offset * 8 + 0x4;
+
+ return 0;
+}
+
+static int rp2040_gbdg_rp1_read_mux(struct rp2040_gbdg *priv_data, u8 gpio,
+ u32 *data)
+{
+ size_t offset;
+ int ret;
+
+ ret = rp2040_gbdg_calc_mux_offset(gpio, &offset);
+ if (ret)
+ return ret;
+
+ *data = readl(priv_data->gpio_base + offset);
+
+ return 0;
+}
+
+static int rp2040_gbdg_rp1_write_mux(struct rp2040_gbdg *priv_data, u8 gpio,
+ u32 val)
+{
+ size_t offset;
+ int ret;
+
+ ret = rp2040_gbdg_calc_mux_offset(gpio, &offset);
+ if (ret)
+ return ret;
+
+ writel(val, priv_data->gpio_base + offset);
+
+ return 0;
+}
+
+static size_t rp2040_gbdg_max_transfer_size(struct spi_device *spi)
+{
+ return MAX_TRANSFER_SIZE;
+}
+
+static int rp2040_gbdg_get_device_info(struct i2c_client *client,
+ struct rp2040_gbdg_device_info *info)
+{
+ u8 buf[TOTAL_RD_HDR_SIZE];
+ u8 retries = I2C_RETRIES;
+ u8 *read_pos = buf;
+ size_t field_size;
+ int ret;
+
+ do {
+ ret = i2c_master_recv(client, buf, sizeof(buf));
+ if (!retries--)
+ break;
+ } while (ret == -ETIMEDOUT);
+
+ if (ret != sizeof(buf))
+ return ret < 0 ? ret : -EIO;
+
+ field_size = sizeof_field(struct rp2040_gbdg_device_info, status);
+ memcpy(&info->status, read_pos, field_size);
+ read_pos += field_size;
+
+ field_size = sizeof_field(struct rp2040_gbdg_device_info, md5);
+ memcpy(&info->md5, read_pos, field_size);
+ read_pos += field_size;
+
+ field_size = sizeof_field(struct rp2040_gbdg_device_info, version);
+ memcpy(&info->version, read_pos, field_size);
+ read_pos += field_size;
+
+ field_size = sizeof_field(struct rp2040_gbdg_device_info, id);
+ memcpy(&info->id, read_pos, field_size);
+
+ return 0;
+}
+
+static int rp2040_gbdg_poll_device_info(struct i2c_client *client,
+ struct rp2040_gbdg_device_info *info)
+{
+ struct rp2040_gbdg_device_info itnl;
+ int ret;
+
+ itnl.status = STATUS_BUSY;
+
+ while (itnl.status & STATUS_BUSY) {
+ ret = rp2040_gbdg_get_device_info(client, &itnl);
+ if (ret)
+ return ret;
+ }
+ memcpy(info, &itnl, sizeof(itnl));
+
+ return 0;
+}
+
+static int rp2040_gbdg_get_buffer_hash(struct i2c_client *client, u8 *md5)
+{
+ struct rp2040_gbdg_device_info info;
+ int ret;
+
+ ret = rp2040_gbdg_poll_device_info(client, &info);
+ if (ret)
+ return ret;
+
+ memcpy(md5, info.md5, MD5_DIGEST_SIZE);
+
+ return 0;
+}
+
+static int rp2040_gbdg_wait_until_free(struct i2c_client *client, u8 *status)
+{
+ struct rp2040_gbdg_device_info info;
+ int ret;
+
+ ret = rp2040_gbdg_poll_device_info(client, &info);
+ if (ret)
+ return ret;
+
+ if (status)
+ *status = info.status;
+
+ return 0;
+}
+
+static int rp2040_gbdg_i2c_send(struct i2c_client *client, const u8 *buf,
+ size_t len)
+{
+ u8 retries = I2C_RETRIES;
+ int ret;
+
+ ret = rp2040_gbdg_wait_until_free(client, NULL);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s() rp2040_gbdg_wait_until_free failed\n", __func__);
+ return ret;
+ }
+
+ do {
+ ret = i2c_master_send(client, buf, len);
+ if (!retries--)
+ break;
+ } while (ret == -ETIMEDOUT);
+
+ if (ret != len) {
+ dev_err(&client->dev, "%s() i2c_master_send returned %d\n",
+ __func__, ret);
+ return ret < 0 ? ret : -EIO;
+ }
+
+ return 0;
+}
+
+static int rp2040_gbdg_10byte_cmd(struct i2c_client *client, u8 cmd, u32 addr,
+ u32 len)
+{
+ u8 buffer[10];
+
+ buffer[0] = FIXED_SIZE_CMD_PREFIX;
+ buffer[1] = cmd;
+ memcpy(&buffer[2], &addr, sizeof(addr));
+ memcpy(&buffer[6], &len, sizeof(len));
+
+ return rp2040_gbdg_i2c_send(client, buffer, sizeof(buffer));
+}
+
+static int rp2040_gbdg_18byte_cmd(struct i2c_client *client, u8 cmd,
+ const u8 *digest)
+{
+ u8 buffer[18];
+
+ buffer[0] = FIXED_SIZE_CMD_PREFIX;
+ buffer[1] = cmd;
+ memcpy(&buffer[2], digest, MD5_DIGEST_SIZE);
+
+ return rp2040_gbdg_i2c_send(client, buffer, sizeof(buffer));
+}
+
+static int rp2040_gbdg_block_hash(struct rp2040_gbdg *priv_data, const u8 *data,
+ size_t len, u8 *out)
+{
+ size_t remaining = RP2040_GBDG_BLOCK_SIZE;
+ size_t pad;
+ int ret;
+
+ static const u8 padding[64] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ };
+
+ if (len > RP2040_GBDG_BLOCK_SIZE) {
+ return -EMSGSIZE;
+ } else if (len == RP2040_GBDG_BLOCK_SIZE) {
+ return crypto_shash_digest(priv_data->shash_desc, data, len,
+ out);
+ } else {
+ ret = crypto_shash_init(priv_data->shash_desc);
+ if (ret)
+ return ret;
+
+ ret = crypto_shash_update(priv_data->shash_desc, data, len);
+ if (ret)
+ return ret;
+ remaining -= len;
+
+ /* Pad up-to a 64-byte boundary, unless that takes us over. */
+ pad = round_up(len, 64);
+ if (pad != len && pad < RP2040_GBDG_BLOCK_SIZE) {
+ ret = crypto_shash_update(priv_data->shash_desc,
+ padding, pad - len);
+ if (ret)
+ return ret;
+ remaining -= (pad - len);
+ }
+
+ /* Pad up-to RP2040_GBDG_BLOCK_SIZE in, preferably, 64-byte chunks */
+ while (remaining) {
+ pad = min_t(size_t, remaining, (size_t)64U);
+ ret = crypto_shash_update(priv_data->shash_desc,
+ padding, pad);
+ if (ret)
+ return ret;
+ remaining -= pad;
+ }
+ return crypto_shash_final(priv_data->shash_desc, out);
+ }
+}
+
+static int rp2040_gbdg_set_remote_buffer_fast(struct rp2040_gbdg *priv_data,
+ const u8 *data, unsigned int len)
+{
+ struct i2c_client *client = priv_data->client;
+ int ret;
+
+ if (len > RP2040_GBDG_BLOCK_SIZE)
+ return -EMSGSIZE;
+ if (!priv_data->fast_xfer_gpios)
+ return -EIO;
+
+ ret = rp2040_gbdg_10byte_cmd(client, CMD_DAT_RECV,
+ priv_data->fast_xfer_recv_gpio_base, len);
+ if (ret) {
+ dev_err(&client->dev, "%s() failed to enter fast data mode\n",
+ __func__);
+ return ret;
+ }
+
+ return rp2040_gbdg_fast_xfer(priv_data, data, len);
+}
+
+static int rp2040_gbdg_set_remote_buffer_i2c(struct rp2040_gbdg *priv_data,
+ const u8 *data, unsigned int len)
+{
+ struct i2c_client *client = priv_data->client;
+ unsigned int write_len;
+ int ret;
+
+ if (len > RP2040_GBDG_BLOCK_SIZE)
+ return -EMSGSIZE;
+
+ priv_data->buffer[0] = WRITE_DATA_PREFIX;
+ write_len = min(len, HALF_BUFFER);
+ memcpy(&priv_data->buffer[1], data, write_len);
+
+ ret = rp2040_gbdg_i2c_send(client, priv_data->buffer, write_len + 1);
+ if (ret)
+ return ret;
+
+ len -= write_len;
+ data += write_len;
+
+ if (!len)
+ return 0;
+
+ priv_data->buffer[0] = WRITE_DATA_UPPER_PREFIX;
+ memcpy(&priv_data->buffer[1], data, len);
+ ret = rp2040_gbdg_i2c_send(client, priv_data->buffer, len + 1);
+
+ return ret;
+}
+
+static int rp2040_gbdg_set_remote_buffer(struct rp2040_gbdg *priv_data,
+ const u8 *data, unsigned int len)
+{
+ if (priv_data->fast_xfer_gpios)
+ return rp2040_gbdg_set_remote_buffer_fast(priv_data, data, len);
+ else
+ return rp2040_gbdg_set_remote_buffer_i2c(priv_data, data, len);
+}
+
+/* Loads data by checksum if available or resorts to sending byte-by-byte */
+static int rp2040_gbdg_load_block_remote(struct rp2040_gbdg *priv_data,
+ const void *data, unsigned int len,
+ u8 *digest, bool persist)
+{
+ u8 ascii_digest[MD5_DIGEST_SIZE * 2 + 1] = { 0 };
+ struct i2c_client *client = priv_data->client;
+ u8 remote_digest[MD5_DIGEST_SIZE];
+ u8 local_digest[MD5_DIGEST_SIZE];
+ int ret;
+
+ if (len > RP2040_GBDG_BLOCK_SIZE)
+ return -EMSGSIZE;
+
+ ret = rp2040_gbdg_block_hash(priv_data, data, len, local_digest);
+ if (ret)
+ return ret;
+
+ if (digest)
+ memcpy(digest, local_digest, MD5_DIGEST_SIZE);
+
+ /* Check if the RP2040 has the data already */
+ ret = rp2040_gbdg_18byte_cmd(client, CMD_READ_CSUM, local_digest);
+ if (ret)
+ return ret;
+
+ ret = rp2040_gbdg_get_buffer_hash(client, remote_digest);
+ if (ret)
+ return ret;
+
+ if (memcmp(local_digest, remote_digest, MD5_DIGEST_SIZE)) {
+ bin2hex(ascii_digest, local_digest, MD5_DIGEST_SIZE);
+ dev_info(&client->dev, "%s() device missing data: %s\n",
+ __func__, ascii_digest);
+ /*
+ * N.B. We're fine to send (the potentially shorter) transfer->len
+ * number of bytes here as the RP2040 will pad with 0xFF up to buffer
+ * size once we stop sending.
+ */
+ ret = rp2040_gbdg_set_remote_buffer(priv_data, data, len);
+ if (ret)
+ return ret;
+
+ /* Make sure the data actually arrived. */
+ ret = rp2040_gbdg_get_buffer_hash(client, remote_digest);
+ if (memcmp(local_digest, remote_digest, MD5_DIGEST_SIZE)) {
+ dev_err(&priv_data->client->dev,
+ "%s() unable to send data to device\n",
+ __func__);
+ return -EREMOTEIO;
+ }
+
+ if (persist) {
+ dev_info(&client->dev,
+ "%s() sent missing data to device, saving\n",
+ __func__);
+ ret = rp2040_gbdg_10byte_cmd(client, CMD_SAVE_CACHE, 0,
+ 0);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int rp2040_gbdg_transfer_block(struct rp2040_gbdg *priv_data,
+ const void *data, unsigned int len)
+{
+ struct i2c_client *client = priv_data->client;
+ int ret;
+
+ if (len > RP2040_GBDG_BLOCK_SIZE)
+ return -EMSGSIZE;
+
+ ret = rp2040_gbdg_load_block_remote(priv_data, data, len, NULL, true);
+ if (ret)
+ return ret;
+
+ /* Remote rambuffer now has correct contents, send it */
+ ret = rp2040_gbdg_10byte_cmd(client, CMD_SEND_RB, 0, len);
+ if (ret)
+ return ret;
+
+ /*
+ * Wait for data to have actually completed sending as we may be de-asserting CS too quickly
+ * otherwise.
+ */
+ ret = rp2040_gbdg_wait_until_free(client, NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rp2040_gbdg_transfer_manifest(struct rp2040_gbdg *priv_data,
+ const u8 *data, unsigned int len)
+{
+ struct i2c_client *client = priv_data->client;
+ static const char magic[] = "DATA_MANFST";
+ unsigned int remaining = len;
+ const u32 data_length = len;
+ u8 digest[MD5_DIGEST_SIZE];
+ u8 *digest_write_pos;
+ u8 status;
+ int ret;
+
+ memcpy(priv_data->manifest_prep, magic, sizeof(magic));
+ memcpy(priv_data->manifest_prep + sizeof(magic), &data_length,
+ sizeof(data_length));
+ digest_write_pos =
+ priv_data->manifest_prep + sizeof(magic) + sizeof(data_length);
+
+ while (remaining) {
+ unsigned int size = min(remaining, RP2040_GBDG_BLOCK_SIZE);
+
+ ret = rp2040_gbdg_block_hash(priv_data, data, size,
+ digest_write_pos);
+ if (ret)
+ return ret;
+
+ remaining -= size;
+ data += size;
+ digest_write_pos += MD5_DIGEST_SIZE;
+ }
+
+ ret = rp2040_gbdg_load_block_remote(
+ priv_data, priv_data->manifest_prep,
+ digest_write_pos - priv_data->manifest_prep, digest, true);
+ if (ret)
+ return ret;
+
+ dev_info(&client->dev, "%s() issue CMD_SEND_MANI\n", __func__);
+ ret = rp2040_gbdg_18byte_cmd(client, CMD_SEND_MANI, digest);
+ if (ret)
+ return ret;
+
+ ret = rp2040_gbdg_wait_until_free(client, &status);
+ if (ret)
+ return ret;
+
+ dev_info(&client->dev, "%s() SEND_MANI response: %02x\n", __func__,
+ status);
+
+ return status;
+}
+
+/* Precondition: correctly initialised fast_xfer_*, gpio_base, rio_base */
+static int rp2040_gbdg_fast_xfer(struct rp2040_gbdg *priv_data, const u8 *data,
+ size_t len)
+{
+ struct i2c_client *client = priv_data->client;
+ void __iomem *clock_toggle;
+ void __iomem *data_set;
+ size_t clock_bank;
+ size_t data_bank;
+ u8 clock_offset;
+ u8 data_offset;
+ u32 clock_mux;
+ u32 data_mux;
+
+ if (priv_data->fast_xfer_requires_i2c_lock)
+ i2c_lock_bus(client->adapter, I2C_LOCK_ROOT_ADAPTER);
+
+ rp2040_gbdg_rp1_read_mux(priv_data, priv_data->fast_xfer_data_index,
+ &data_mux);
+ rp2040_gbdg_rp1_read_mux(priv_data, priv_data->fast_xfer_clock_index,
+ &clock_mux);
+
+ gpiod_direction_output(priv_data->fast_xfer_gpios->desc[0], 1);
+ gpiod_direction_output(priv_data->fast_xfer_gpios->desc[1], 0);
+
+ rp2040_gbdg_rp1_calc_offsets(priv_data->fast_xfer_data_index,
+ &data_bank, &data_offset);
+ rp2040_gbdg_rp1_calc_offsets(priv_data->fast_xfer_clock_index,
+ &clock_bank, &clock_offset);
+
+ data_set = priv_data->rio_base + data_bank + 0x2000; /* SET offset */
+ clock_toggle =
+ priv_data->rio_base + clock_bank + 0x1000; /* XOR offset */
+
+ while (len--) {
+ /* MSB first ordering */
+ u32 d = ~(*data++) << 4U;
+ /*
+ * Clock out each bit of data, LSB first
+ * (DDR, achieves approx 5 Mbps)
+ */
+ for (size_t i = 0; i < 8; i++) {
+ /* Branchless set/clr data */
+ writel(1 << data_offset,
+ data_set + ((d <<= 1) & 0x1000) /* CLR offset */
+ );
+
+ /* Toggle the clock */
+ writel(1 << clock_offset, clock_toggle);
+ }
+ }
+
+ rp2040_gbdg_rp1_write_mux(priv_data, priv_data->fast_xfer_data_index,
+ data_mux);
+ rp2040_gbdg_rp1_write_mux(priv_data, priv_data->fast_xfer_clock_index,
+ clock_mux);
+
+ if (priv_data->fast_xfer_requires_i2c_lock)
+ i2c_unlock_bus(client->adapter, I2C_LOCK_ROOT_ADAPTER);
+
+ return 0;
+}
+
+static int rp2040_gbdg_transfer_bypass(struct rp2040_gbdg *priv_data,
+ const u8 *data, unsigned int length)
+{
+ int ret;
+ u8 *buf;
+
+ if (priv_data->fast_xfer_gpios) {
+ ret = rp2040_gbdg_10byte_cmd(
+ priv_data->client, CMD_DAT_EMIT,
+ priv_data->fast_xfer_recv_gpio_base, length);
+ return ret ? ret :
+ rp2040_gbdg_fast_xfer(priv_data, data, length);
+ }
+
+ buf = priv_data->buffer;
+
+ while (length) {
+ unsigned int xfer = min(length, HALF_BUFFER);
+
+ buf[0] = DIRECT_PREFIX;
+ buf[1] = DIRECT_CMD_EMIT;
+ memcpy(&buf[2], data, xfer);
+ ret = rp2040_gbdg_i2c_send(priv_data->client, buf, xfer + 2);
+ if (ret)
+ return ret;
+ length -= xfer;
+ data += xfer;
+ }
+
+ return 0;
+}
+
+static int rp2040_gbdg_transfer_cached(struct rp2040_gbdg *priv_data,
+ const u8 *data, unsigned int length)
+{
+ int ret;
+
+ /*
+ * Caching mechanism divides data into '8KiB - 9' (8183 byte)
+ * 'RP2040_GBDG_BLOCK_SIZE' blocks.
+ *
+ * If there's a large amount of data to send, instead, attempt to make use
+ * of a manifest.
+ */
+ if (length > (2 * RP2040_GBDG_BLOCK_SIZE)) {
+ if (!rp2040_gbdg_transfer_manifest(priv_data, data, length))
+ return 0;
+ }
+
+ priv_data->transfer_progress = 0;
+ while (length) {
+ unsigned int xfer = min(length, RP2040_GBDG_BLOCK_SIZE);
+
+ ret = rp2040_gbdg_transfer_block(priv_data, data, xfer);
+ if (ret)
+ return ret;
+ length -= xfer;
+ data += xfer;
+ priv_data->transfer_progress += xfer;
+ }
+ priv_data->transfer_progress = 0;
+
+ return 0;
+}
+
+static int rp2040_gbdg_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ /* All transfers are performed in a synchronous manner. As such, return '0'
+ * on success or -ve on failure. (Returning +ve indicates async xfer)
+ */
+
+ struct rp2040_gbdg *priv_data = spi_controller_get_devdata(ctlr);
+
+ if (priv_data->bypass_cache) {
+ return rp2040_gbdg_transfer_bypass(priv_data, transfer->tx_buf,
+ transfer->len);
+ } else {
+ return rp2040_gbdg_transfer_cached(priv_data, transfer->tx_buf,
+ transfer->len);
+ }
+}
+
+static void rp2040_gbdg_set_cs(struct spi_device *spi, bool enable)
+{
+ static const char disable_cs[] = { DIRECT_PREFIX, DIRECT_CMD_CS, 0x00 };
+ static const char enable_cs[] = { DIRECT_PREFIX, DIRECT_CMD_CS, 0x10 };
+ struct rp2040_gbdg *p_data;
+
+ p_data = spi_controller_get_devdata(spi->controller);
+
+ /*
+ * 'enable' is inverted and instead describes the logic level of an
+ * active-low CS.
+ */
+ rp2040_gbdg_i2c_send(p_data->client, enable ? disable_cs : enable_cs,
+ 3);
+}
+
+static int rp2040_gbdg_gpio_request(struct gpio_chip *gc, unsigned int offset)
+{
+ struct rp2040_gbdg *priv_data = gpiochip_get_data(gc);
+ u32 pattern;
+ int ret;
+
+ if (offset >= NUM_GPIO)
+ return -EINVAL;
+
+ pattern = (1 << (offset + 8));
+ if (pattern & priv_data->gpio_requested)
+ return -EBUSY;
+
+ /* Resume if previously no gpio requested */
+ if (!priv_data->gpio_requested) {
+ ret = pm_runtime_resume_and_get(&priv_data->client->dev);
+ if (ret) {
+ dev_err(&priv_data->client->dev,
+ "%s(%u) unable to resume\n", __func__, offset);
+ return ret;
+ }
+ }
+
+ priv_data->gpio_requested |= pattern;
+
+ return 0;
+}
+
+static void rp2040_gbdg_gpio_free(struct gpio_chip *gc, unsigned int offset)
+{
+ struct rp2040_gbdg *priv_data = gpiochip_get_data(gc);
+ u32 pattern;
+ int ret;
+
+ if (offset >= NUM_GPIO || !priv_data->gpio_requested)
+ return;
+
+ pattern = (1 << (offset + 8));
+
+ priv_data->gpio_requested &= ~pattern;
+ rp2040_gbdg_gpio_dir_in(gc, offset);
+ rp2040_gbdg_gpio_set(gc, offset, 0);
+
+ if (!priv_data->gpio_requested) {
+ ret = pm_runtime_put_autosuspend(&priv_data->client->dev);
+ if (ret) {
+ dev_err(&priv_data->client->dev,
+ "%s(%u) unable to put_autosuspend\n", __func__,
+ offset);
+ }
+ }
+}
+
+static int rp2040_gbdg_gpio_get_direction(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct rp2040_gbdg *priv_data = gpiochip_get_data(gc);
+
+ if (offset >= NUM_GPIO)
+ return -EINVAL;
+
+ return (priv_data->gpio_direction & (1 << (offset + 8))) ?
+ GPIO_LINE_DIRECTION_IN :
+ GPIO_LINE_DIRECTION_OUT;
+}
+
+static int rp2040_gbdg_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
+{
+ struct rp2040_gbdg *priv_data = gpiochip_get_data(gc);
+ struct i2c_client *client = priv_data->client;
+
+ if (offset >= NUM_GPIO)
+ return -EINVAL;
+
+ priv_data->gpio_direction |= (1 << (offset + 8));
+
+ return rp2040_gbdg_10byte_cmd(client, CMD_GPIO_OE,
+ ~priv_data->gpio_direction,
+ priv_data->gpio_direction);
+}
+
+static int rp2040_gbdg_gpio_dir_out(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct rp2040_gbdg *priv_data = gpiochip_get_data(gc);
+ struct i2c_client *client = priv_data->client;
+ u32 pattern;
+ int ret;
+
+ if (offset >= NUM_GPIO)
+ return -EINVAL;
+
+ pattern = (1 << (offset + 8));
+
+ ret = rp2040_gbdg_10byte_cmd(client, CMD_GPIO_ST_CL,
+ value ? pattern : 0, !value ? pattern : 0);
+ if (ret) {
+ dev_err(&client->dev, "%s(%u, %d) could not ST_CL\n", __func__,
+ offset, value);
+ return ret;
+ }
+
+ priv_data->gpio_direction &= ~pattern;
+ ret = rp2040_gbdg_10byte_cmd(client, CMD_GPIO_OE,
+ ~priv_data->gpio_direction,
+ priv_data->gpio_direction);
+
+ return ret;
+}
+
+static int rp2040_gbdg_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct rp2040_gbdg *priv_data = gpiochip_get_data(gc);
+ struct i2c_client *client = priv_data->client;
+ struct rp2040_gbdg_device_info info;
+ int ret;
+
+ if (offset >= NUM_GPIO)
+ return -EINVAL;
+
+ ret = rp2040_gbdg_get_device_info(client, &info);
+ if (ret)
+ return ret;
+
+ return info.status & (1 << (offset + 8)) ? 1 : 0;
+}
+
+static void rp2040_gbdg_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct rp2040_gbdg *priv_data = gpiochip_get_data(gc);
+ struct i2c_client *client = priv_data->client;
+ u32 pattern;
+
+ if (offset >= NUM_GPIO)
+ return;
+
+ pattern = (1 << (offset + 8));
+ rp2040_gbdg_10byte_cmd(client, CMD_GPIO_ST_CL, value ? pattern : 0,
+ !value ? pattern : 0);
+}
+
+static int rp2040_gbdg_get_regulator(struct device *dev,
+ struct rp2040_gbdg *rp2040_gbdg)
+{
+ struct regulator *reg = devm_regulator_get(dev, "power");
+
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ rp2040_gbdg->regulator = reg;
+
+ return 0;
+}
+
+static void rp2040_gbdg_parse_dt(struct rp2040_gbdg *rp2040_gbdg)
+{
+ struct i2c_client *client = rp2040_gbdg->client;
+ struct of_phandle_args of_args[2] = { 0 };
+ struct device *dev = &client->dev;
+ struct device_node *dn;
+
+ rp2040_gbdg->bypass_cache =
+ of_property_read_bool(client->dev.of_node, "bypass-cache");
+
+ /* Optionally configure fast_xfer if RP1 is being used */
+ if (of_parse_phandle_with_args(client->dev.of_node, "fast_xfer-gpios",
+ "#gpio-cells", 0, &of_args[0]) ||
+ of_parse_phandle_with_args(client->dev.of_node, "fast_xfer-gpios",
+ "#gpio-cells", 1, &of_args[1])) {
+ dev_info(dev, "Could not parse fast_xfer-gpios phandles\n");
+ goto node_put;
+ }
+
+ if (of_args[0].np != of_args[1].np) {
+ dev_info(
+ dev,
+ "fast_xfer-gpios are not provided by the same controller\n");
+ goto node_put;
+ }
+ dn = of_args[0].np;
+ if (!of_device_is_compatible(dn, "raspberrypi,rp1-gpio")) {
+ dev_info(dev, "fast_xfer-gpios controller is not an rp1\n");
+ goto node_put;
+ }
+ if (of_args[0].args_count != 2 || of_args[1].args_count != 2) {
+ dev_info(dev, "of_args count is %d\n", of_args[0].args_count);
+ goto node_put;
+ }
+
+ if (of_property_read_u32_index(
+ client->dev.of_node, "fast_xfer_recv_gpio_base", 0,
+ &rp2040_gbdg->fast_xfer_recv_gpio_base)) {
+ dev_info(dev, "Could not read fast_xfer_recv_gpio_base\n");
+ goto node_put;
+ }
+
+ rp2040_gbdg->fast_xfer_gpios =
+ devm_gpiod_get_array_optional(dev, "fast_xfer", GPIOD_ASIS);
+ if (!rp2040_gbdg->fast_xfer_gpios) {
+ dev_info(dev, "Could not acquire fast_xfer-gpios\n");
+ goto node_put;
+ }
+
+ rp2040_gbdg->fast_xfer_data_index = of_args[0].args[0];
+ rp2040_gbdg->fast_xfer_clock_index = of_args[1].args[0];
+ rp2040_gbdg->fast_xfer_requires_i2c_lock = of_property_read_bool(
+ client->dev.of_node, "fast_xfer_requires_i2c_lock");
+
+ rp2040_gbdg->gpio_base = of_iomap(dn, 0);
+ if (IS_ERR_OR_NULL(rp2040_gbdg->gpio_base)) {
+ dev_info(&client->dev, "%s() unable to map gpio_base\n",
+ __func__);
+ rp2040_gbdg->gpio_base = NULL;
+ devm_gpiod_put_array(dev, rp2040_gbdg->fast_xfer_gpios);
+ rp2040_gbdg->fast_xfer_gpios = NULL;
+ goto node_put;
+ }
+
+ rp2040_gbdg->rio_base = of_iomap(dn, 1);
+ if (IS_ERR_OR_NULL(rp2040_gbdg->rio_base)) {
+ dev_info(&client->dev, "%s() unable to map rio_base\n",
+ __func__);
+ rp2040_gbdg->rio_base = NULL;
+ iounmap(rp2040_gbdg->gpio_base);
+ rp2040_gbdg->gpio_base = NULL;
+ devm_gpiod_put_array(dev, rp2040_gbdg->fast_xfer_gpios);
+ rp2040_gbdg->fast_xfer_gpios = NULL;
+ goto node_put;
+ }
+
+node_put:
+ if (of_args[0].np)
+ of_node_put(of_args[0].np);
+ if (of_args[1].np)
+ of_node_put(of_args[1].np);
+}
+
+static int rp2040_gbdg_power_off(struct rp2040_gbdg *rp2040_gbdg)
+{
+ struct device *dev = &rp2040_gbdg->client->dev;
+ int ret;
+
+ ret = regulator_disable(rp2040_gbdg->regulator);
+ if (ret) {
+ dev_err(dev, "%s: Could not disable regulator\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rp2040_gbdg_power_on(struct rp2040_gbdg *rp2040_gbdg)
+{
+ struct device *dev = &rp2040_gbdg->client->dev;
+ int ret;
+
+ ret = regulator_enable(rp2040_gbdg->regulator);
+ if (ret) {
+ dev_err(dev, "%s: Could not enable regulator\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int transfer_progress_show(struct seq_file *s, void *data)
+{
+ struct rp2040_gbdg *rp2040_gbdg = s->private;
+
+ seq_printf(s, "%zu\n", rp2040_gbdg->transfer_progress);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(transfer_progress);
+
+static int rp2040_gbdg_probe(struct i2c_client *client)
+{
+ struct rp2040_gbdg_device_info info;
+ struct spi_controller *controller;
+ struct device *dev = &client->dev;
+ struct rp2040_gbdg *rp2040_gbdg;
+ struct device_node *np;
+ char debugfs_name[128];
+ int ret;
+
+ np = dev->of_node;
+
+ controller = devm_spi_alloc_host(dev, sizeof(struct rp2040_gbdg));
+ if (!controller)
+ return dev_err_probe(dev, ENOMEM,
+ "could not alloc spi controller\n");
+
+ rp2040_gbdg = spi_controller_get_devdata(controller);
+ i2c_set_clientdata(client, rp2040_gbdg);
+ rp2040_gbdg->controller = controller;
+ rp2040_gbdg->client = client;
+
+ ret = rp2040_gbdg_get_regulator(dev, rp2040_gbdg);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot get regulator\n");
+
+ ret = rp2040_gbdg_power_on(rp2040_gbdg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Could not power on device\n");
+
+ pm_runtime_set_active(dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+
+ ret = rp2040_gbdg_get_device_info(client, &info);
+ if (ret) {
+ dev_err(dev, "Could not get device info\n");
+ goto err_pm;
+ }
+
+ dev_info(dev, "%s() found dev ID: %llx, fw ver. %u\n", __func__,
+ info.id, info.version);
+
+ rp2040_gbdg->shash = crypto_alloc_shash("md5", 0, 0);
+ if (IS_ERR(rp2040_gbdg->shash)) {
+ ret = PTR_ERR(rp2040_gbdg->shash);
+ dev_err(dev, "Could not allocate shash\n");
+ goto err_pm;
+ }
+
+ if (crypto_shash_digestsize(rp2040_gbdg->shash) != MD5_DIGEST_SIZE) {
+ ret = -EINVAL;
+ dev_err(dev, "error: Unexpected hash digest size\n");
+ goto err_shash;
+ }
+
+ rp2040_gbdg->shash_desc =
+ devm_kmalloc(dev,
+ sizeof(struct shash_desc) +
+ crypto_shash_descsize(rp2040_gbdg->shash),
+ 0);
+
+ if (!rp2040_gbdg->shash_desc) {
+ ret = -ENOMEM;
+ dev_err(dev,
+ "error: Could not allocate memory for shash_desc\n");
+ goto err_shash;
+ }
+ rp2040_gbdg->shash_desc->tfm = rp2040_gbdg->shash;
+
+ controller->bus_num = -1;
+ controller->num_chipselect = 1;
+ controller->mode_bits = SPI_CPOL | SPI_CPHA;
+ controller->bits_per_word_mask = SPI_BPW_MASK(8);
+ controller->min_speed_hz = 35000000;
+ controller->max_speed_hz = 35000000;
+ controller->max_transfer_size = rp2040_gbdg_max_transfer_size;
+ controller->max_message_size = rp2040_gbdg_max_transfer_size;
+ controller->transfer_one = rp2040_gbdg_transfer_one;
+ controller->set_cs = rp2040_gbdg_set_cs;
+
+ controller->dev.of_node = np;
+ controller->auto_runtime_pm = true;
+
+ ret = devm_spi_register_controller(dev, controller);
+ if (ret) {
+ dev_err(dev, "error: Could not register SPI controller\n");
+ goto err_shash;
+ }
+
+ memset(&rp2040_gbdg->gc, 0, sizeof(struct gpio_chip));
+ rp2040_gbdg->gc.parent = dev;
+ rp2040_gbdg->gc.label = MODULE_NAME;
+ rp2040_gbdg->gc.owner = THIS_MODULE;
+ rp2040_gbdg->gc.base = -1;
+ rp2040_gbdg->gc.ngpio = NUM_GPIO;
+
+ rp2040_gbdg->gc.request = rp2040_gbdg_gpio_request;
+ rp2040_gbdg->gc.free = rp2040_gbdg_gpio_free;
+ rp2040_gbdg->gc.get_direction = rp2040_gbdg_gpio_get_direction;
+ rp2040_gbdg->gc.direction_input = rp2040_gbdg_gpio_dir_in;
+ rp2040_gbdg->gc.direction_output = rp2040_gbdg_gpio_dir_out;
+ rp2040_gbdg->gc.get = rp2040_gbdg_gpio_get;
+ rp2040_gbdg->gc.set = rp2040_gbdg_gpio_set;
+ rp2040_gbdg->gc.can_sleep = true;
+
+ rp2040_gbdg->gpio_requested = 0;
+
+ /* Coming out of reset, all GPIOs are inputs */
+ rp2040_gbdg->gpio_direction = ~0;
+
+ ret = devm_gpiochip_add_data(dev, &rp2040_gbdg->gc, rp2040_gbdg);
+ if (ret) {
+ dev_err(dev, "error: Could not add data to gpiochip\n");
+ goto err_shash;
+ }
+
+ rp2040_gbdg_parse_dt(rp2040_gbdg);
+
+ snprintf(debugfs_name, sizeof(debugfs_name), "rp2040-spi:%s",
+ dev_name(dev));
+ rp2040_gbdg->debugfs = debugfs_create_dir(debugfs_name, NULL);
+ debugfs_create_file("transfer_progress", 0444, rp2040_gbdg->debugfs,
+ rp2040_gbdg, &transfer_progress_fops);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+
+err_shash:
+ crypto_free_shash(rp2040_gbdg->shash);
+err_pm:
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ rp2040_gbdg_power_off(rp2040_gbdg);
+
+ return ret;
+}
+
+static void rp2040_gbdg_remove(struct i2c_client *client)
+{
+ struct rp2040_gbdg *priv_data = i2c_get_clientdata(client);
+
+ crypto_free_shash(priv_data->shash);
+
+ if (priv_data->gpio_base) {
+ iounmap(priv_data->gpio_base);
+ priv_data->gpio_base = NULL;
+ }
+ if (priv_data->rio_base) {
+ iounmap(priv_data->rio_base);
+ priv_data->rio_base = NULL;
+ }
+
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ rp2040_gbdg_power_off(priv_data);
+ pm_runtime_set_suspended(&client->dev);
+}
+
+static const struct i2c_device_id rp2040_gbdg_id[] = {
+ { "rp2040-gpio-bridge", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, rp2040_gbdg_id);
+
+static const struct of_device_id rp2040_gbdg_of_match[] = {
+ { .compatible = "raspberrypi,rp2040-gpio-bridge" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rp2040_gbdg_of_match);
+
+static int rp2040_gbdg_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ return rp2040_gbdg_power_off(i2c_get_clientdata(client));
+}
+
+static int rp2040_gbdg_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ return rp2040_gbdg_power_on(i2c_get_clientdata(client));
+}
+
+static const struct dev_pm_ops rp2040_gbdg_pm_ops = { SET_RUNTIME_PM_OPS(
+ rp2040_gbdg_runtime_suspend, rp2040_gbdg_runtime_resume, NULL) };
+
+static struct i2c_driver rp2040_gbdg_driver = {
+ .driver = {
+ .name = MODULE_NAME,
+ .of_match_table = of_match_ptr(rp2040_gbdg_of_match),
+ .pm = &rp2040_gbdg_pm_ops,
+ },
+ .probe = rp2040_gbdg_probe,
+ .remove = rp2040_gbdg_remove,
+ .id_table = rp2040_gbdg_id,
+};
+
+module_i2c_driver(rp2040_gbdg_driver);
+
+MODULE_AUTHOR("Richard Oliver <[email protected]>");
+MODULE_DESCRIPTION("Raspberry Pi RP2040 GPIO Bridge");
+MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: md5");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index e25df9990f82..71f30e94a68f 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -3882,6 +3882,7 @@ static int spi_set_cs_timing(struct spi_device *spi)
*/
int spi_setup(struct spi_device *spi)
{
+ struct spi_controller *ctlr = spi->controller;
unsigned bad_bits, ugly_bits;
int status;
@@ -3908,6 +3909,14 @@ int spi_setup(struct spi_device *spi)
"setup: MOSI configured to idle low and high at the same time.\n");
return -EINVAL;
}
+
+ if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
+ ctlr->cs_gpiods[spi->chip_select[0]] && !(spi->mode & SPI_CS_HIGH)) {
+ dev_dbg(&spi->dev,
+ "setup: forcing CS_HIGH (use_gpio_descriptors)\n");
+ spi->mode |= SPI_CS_HIGH;
+ }
+
/*
* Help drivers fail *cleanly* when they need options
* that aren't supported with their current controller.
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 5300c942a2a4..748cb6acf16e 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -428,7 +428,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
if (ctlr->use_gpio_descriptors && spi_get_csgpiod(spi, 0))
- tmp |= SPI_CS_HIGH;
+ { /*tmp |= SPI_CS_HIGH;*/ }
tmp |= spi->mode & ~SPI_MODE_MASK;
spi->mode = tmp & SPI_MODE_USER_MASK;
@@ -718,6 +718,7 @@ static const struct spi_device_id spidev_spi_ids[] = {
{ .name = /* semtech */ "sx1301" },
{ .name = /* silabs */ "em3581" },
{ .name = /* silabs */ "si3210" },
+ { .name = "spidev" },
{},
};
MODULE_DEVICE_TABLE(spi, spidev_spi_ids);
@@ -728,7 +729,7 @@ MODULE_DEVICE_TABLE(spi, spidev_spi_ids);
*/
static int spidev_of_check(struct device *dev)
{
- if (device_property_match_string(dev, "compatible", "spidev") < 0)
+ if (1 || device_property_match_string(dev, "compatible", "spidev") < 0)
return 0;
dev_err(dev, "spidev listed directly in DT is not supported\n");