aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi/spi-dw-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi/spi-dw-core.c')
-rw-r--r--drivers/spi/spi-dw-core.c132
1 files changed, 109 insertions, 23 deletions
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index ea517af9435f..a0965f7f3607 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -100,7 +100,8 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable)
* support active-high or active-low CS level.
*/
if (cs_high == enable)
- dw_writel(dws, DW_SPI_SER, BIT(spi_get_chipselect(spi, 0)));
+ dw_writel(dws, DW_SPI_SER,
+ BIT(spi_get_csgpiod(spi, 0) ? 0 : spi_get_chipselect(spi, 0)));
else
dw_writel(dws, DW_SPI_SER, 0);
}
@@ -201,7 +202,18 @@ int dw_spi_check_status(struct dw_spi *dws, bool raw)
/* Generically handle the erroneous situation */
if (ret) {
- dw_spi_reset_chip(dws);
+ /*
+ * Forcibly halting the controller can cause DMA to hang.
+ * Defer to dw_spi_handle_err outside of interrupt context
+ * and mask further interrupts for the current transfer.
+ */
+ if (dws->dma_mapped) {
+ dw_spi_mask_intr(dws, 0xff);
+ dw_readl(dws, DW_SPI_ICR);
+ } else {
+ dw_spi_reset_chip(dws);
+ }
+
if (dws->host->cur_msg)
dws->host->cur_msg->status = ret;
}
@@ -210,6 +222,32 @@ int dw_spi_check_status(struct dw_spi *dws, bool raw)
}
EXPORT_SYMBOL_NS_GPL(dw_spi_check_status, "SPI_DW_CORE");
+static inline bool dw_spi_ctlr_busy(struct dw_spi *dws)
+{
+ return dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_BUSY;
+}
+
+static enum hrtimer_restart dw_spi_hrtimer_handler(struct hrtimer *hr)
+{
+ struct dw_spi *dws = container_of(hr, struct dw_spi, hrtimer);
+
+ if (!dw_spi_ctlr_busy(dws)) {
+ spi_finalize_current_transfer(dws->host);
+ return HRTIMER_NORESTART;
+ }
+
+ if (!dws->idle_wait_retries) {
+ dev_err(&dws->host->dev, "controller stuck at busy\n");
+ spi_finalize_current_transfer(dws->host);
+ return HRTIMER_NORESTART;
+ }
+
+ dws->idle_wait_retries--;
+ hrtimer_forward_now(hr, dws->idle_wait_interval);
+
+ return HRTIMER_RESTART;
+}
+
static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
{
u16 irq_status = dw_readl(dws, DW_SPI_ISR);
@@ -226,12 +264,32 @@ static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
* final stage of the transfer. By doing so we'll get the next IRQ
* right when the leftover incoming data is received.
*/
- dw_reader(dws);
- if (!dws->rx_len) {
- dw_spi_mask_intr(dws, 0xff);
- spi_finalize_current_transfer(dws->host);
- } else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) {
- dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
+ if (dws->rx_len) {
+ dw_reader(dws);
+ if (!dws->rx_len) {
+ dw_spi_mask_intr(dws, 0xff);
+ spi_finalize_current_transfer(dws->host);
+ } else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) {
+ dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
+ }
+ } else if (!dws->tx_len) {
+ dw_spi_mask_intr(dws, DW_SPI_INT_TXEI);
+ if (dw_spi_ctlr_busy(dws)) {
+ ktime_t period = ns_to_ktime(DIV_ROUND_UP(NSEC_PER_SEC, dws->current_freq));
+
+ /*
+ * Make the initial wait an underestimate of how long the transfer
+ * should take, then poll rapidly to reduce the delay
+ */
+ hrtimer_start(&dws->hrtimer,
+ period * (8 * dws->n_bytes - 1),
+ HRTIMER_MODE_REL);
+ dws->idle_wait_retries = 10;
+ dws->idle_wait_interval = period;
+ } else {
+ spi_finalize_current_transfer(dws->host);
+ }
+ return IRQ_HANDLED;
}
/*
@@ -241,8 +299,12 @@ static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
*/
if (irq_status & DW_SPI_INT_TXEI) {
dw_writer(dws);
- if (!dws->tx_len)
- dw_spi_mask_intr(dws, DW_SPI_INT_TXEI);
+ if (!dws->tx_len) {
+ if (dws->rx_len)
+ dw_spi_mask_intr(dws, DW_SPI_INT_TXEI);
+ else
+ dw_writel(dws, DW_SPI_TXFTLR, 0);
+ }
}
return IRQ_HANDLED;
@@ -337,7 +399,7 @@ void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
dw_writel(dws, DW_SPI_CTRLR1, cfg->ndf ? cfg->ndf - 1 : 0);
/* Note DW APB SSI clock divider doesn't support odd numbers */
- clk_div = (DIV_ROUND_UP(dws->max_freq, cfg->freq) + 1) & 0xfffe;
+ clk_div = min(DIV_ROUND_UP(dws->max_freq, cfg->freq) + 1, 0xfffe) & 0xfffe;
speed_hz = dws->max_freq / clk_div;
if (dws->current_freq != speed_hz) {
@@ -363,15 +425,18 @@ static void dw_spi_irq_setup(struct dw_spi *dws)
* will be adjusted at the final stage of the IRQ-based SPI transfer
* execution so not to lose the leftover of the incoming data.
*/
- level = min_t(unsigned int, dws->fifo_len / 2, dws->tx_len);
+ level = min_t(unsigned int, dws->fifo_len / 2, dws->tx_len ? dws->tx_len : dws->rx_len);
dw_writel(dws, DW_SPI_TXFTLR, level);
dw_writel(dws, DW_SPI_RXFTLR, level - 1);
dws->transfer_handler = dw_spi_transfer_handler;
- imask = DW_SPI_INT_TXEI | DW_SPI_INT_TXOI |
- DW_SPI_INT_RXUI | DW_SPI_INT_RXOI | DW_SPI_INT_RXFI;
+ imask = DW_SPI_INT_TXEI | DW_SPI_INT_TXOI;
+ if (dws->rx_len)
+ imask |= DW_SPI_INT_RXUI | DW_SPI_INT_RXOI | DW_SPI_INT_RXFI;
dw_spi_umask_intr(dws, imask);
+ if (!dws->tx_len)
+ dw_writel(dws, DW_SPI_DR, 0);
}
/*
@@ -394,18 +459,23 @@ static int dw_spi_poll_transfer(struct dw_spi *dws,
delay.unit = SPI_DELAY_UNIT_SCK;
nbits = dws->n_bytes * BITS_PER_BYTE;
+ if (!dws->tx_len)
+ dw_writel(dws, DW_SPI_DR, 0);
+
do {
- dw_writer(dws);
+ if (dws->tx_len)
+ dw_writer(dws);
delay.value = nbits * (dws->rx_len - dws->tx_len);
spi_delay_exec(&delay, transfer);
- dw_reader(dws);
+ if (dws->rx_len)
+ dw_reader(dws);
ret = dw_spi_check_status(dws, true);
if (ret)
return ret;
- } while (dws->rx_len);
+ } while (dws->rx_len || dws->tx_len || dw_spi_ctlr_busy(dws));
return 0;
}
@@ -420,6 +490,7 @@ static int dw_spi_transfer_one(struct spi_controller *host,
.dfs = transfer->bits_per_word,
.freq = transfer->speed_hz,
};
+ int buswidth;
int ret;
dws->dma_mapped = 0;
@@ -429,6 +500,23 @@ static int dw_spi_transfer_one(struct spi_controller *host,
dws->rx = transfer->rx_buf;
dws->rx_len = dws->tx_len;
+ if (!dws->rx) {
+ dws->rx_len = 0;
+ cfg.tmode = DW_SPI_CTRLR0_TMOD_TO;
+ }
+
+ if (!dws->rx) {
+ dws->rx_len = 0;
+ cfg.tmode = DW_SPI_CTRLR0_TMOD_TO;
+ }
+ if (!dws->tx) {
+ dws->tx_len = 0;
+ cfg.tmode = DW_SPI_CTRLR0_TMOD_RO;
+ cfg.ndf = dws->rx_len;
+ }
+ buswidth = transfer->rx_buf ? transfer->rx_nbits :
+ (transfer->tx_buf ? transfer->tx_nbits : 1);
+
/* Ensure the data above is visible for all CPUs */
smp_mb();
@@ -607,11 +695,6 @@ static int dw_spi_write_then_read(struct dw_spi *dws, struct spi_device *spi)
return 0;
}
-static inline bool dw_spi_ctlr_busy(struct dw_spi *dws)
-{
- return dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_BUSY;
-}
-
static int dw_spi_wait_mem_op_done(struct dw_spi *dws)
{
int retry = DW_SPI_WAIT_RETRIES;
@@ -959,10 +1042,12 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
dev_warn(dev, "DMA init failed\n");
} else {
host->can_dma = dws->dma_ops->can_dma;
- host->flags |= SPI_CONTROLLER_MUST_TX;
}
}
+ hrtimer_init(&dws->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ dws->hrtimer.function = dw_spi_hrtimer_handler;
+
ret = spi_register_controller(host);
if (ret) {
dev_err_probe(dev, ret, "problem registering spi host\n");
@@ -988,6 +1073,7 @@ void dw_spi_remove_host(struct dw_spi *dws)
{
dw_spi_debugfs_remove(dws);
+ hrtimer_cancel(&dws->hrtimer);
spi_unregister_controller(dws->host);
if (dws->dma_ops && dws->dma_ops->dma_exit)