aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/a3000.c
blob: c3028726bbe4651e30fbdccbd33f5548765892f2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>

#include <asm/page.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>

#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_tcq.h>
#include "wd33c93.h"
#include "a3000.h"


struct a3000_hostdata {
	struct WD33C93_hostdata wh;
	struct a3000_scsiregs *regs;
	struct device *dev;
};

#define DMA_DIR(d)   ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)

static irqreturn_t a3000_intr(int irq, void *data)
{
	struct Scsi_Host *instance = data;
	struct a3000_hostdata *hdata = shost_priv(instance);
	unsigned int status = hdata->regs->ISTR;
	unsigned long flags;

	if (!(status & ISTR_INT_P))
		return IRQ_NONE;
	if (status & ISTR_INTS) {
		spin_lock_irqsave(instance->host_lock, flags);
		wd33c93_intr(instance);
		spin_unlock_irqrestore(instance->host_lock, flags);
		return IRQ_HANDLED;
	}
	pr_warn("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status);
	return IRQ_NONE;
}

static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
	struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
	unsigned long len = scsi_pointer->this_residual;
	struct Scsi_Host *instance = cmd->device->host;
	struct a3000_hostdata *hdata = shost_priv(instance);
	struct WD33C93_hostdata *wh = &hdata->wh;
	struct a3000_scsiregs *regs = hdata->regs;
	unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
	dma_addr_t addr;

	addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
			      len, DMA_DIR(dir_in));
	if (dma_mapping_error(hdata->dev, addr)) {
		dev_warn(hdata->dev, "cannot map SCSI data block %p\n",
			 scsi_pointer->ptr);
		return 1;
	}
	scsi_pointer->dma_handle = addr;

	/*
	 * if the physical address has the wrong alignment, or if
	 * physical address is bad, or if it is a write and at the
	 * end of a physical memory chunk, then allocate a bounce
	 * buffer
	 * MSch 20220629 - only wrong alignment tested - bounce
	 * buffer returned by kmalloc is guaranteed to be aligned
	 */
	if (addr & A3000_XFER_MASK) {
		WARN_ONCE(1, "Invalid alignment for DMA!");
		/* drop useless mapping */
		dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
				 scsi_pointer->this_residual,
				 DMA_DIR(dir_in));

		wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
		wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
						GFP_KERNEL);

		/* can't allocate memory; use PIO */
		if (!wh->dma_bounce_buffer) {
			wh->dma_bounce_len = 0;
			scsi_pointer->dma_handle = (dma_addr_t) NULL;
			return 1;
		}

		if (!dir_in) {
			/* copy to bounce buffer for a write */
			memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
			       scsi_pointer->this_residual);
		}

		addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
				      len, DMA_DIR(dir_in));
		if (dma_mapping_error(hdata->dev, addr)) {
			dev_warn(hdata->dev,
				 "cannot map SCSI data block %p\n",
				 scsi_pointer->ptr);
			return 1;
		}
		scsi_pointer->dma_handle = addr;
	}

	/* setup dma direction */
	if (!dir_in)
		cntr |= CNTR_DDIR;

	/* remember direction */
	wh->dma_dir = dir_in;

	regs->CNTR = cntr;

	/* setup DMA *physical* address */
	regs->ACR = addr;

	/* no more cache flush here - dma_map_single() takes care */

	/* start DMA */
	mb();			/* make sure setup is completed */
	regs->ST_DMA = 1;
	mb();			/* make sure DMA has started before next IO */

	/* return success */
	return 0;
}

static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
		     int status)
{
	struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt);
	struct a3000_hostdata *hdata = shost_priv(instance);
	struct WD33C93_hostdata *wh = &hdata->wh;
	struct a3000_scsiregs *regs = hdata->regs;

	/* disable SCSI interrupts */
	unsigned short cntr = CNTR_PDMD;

	if (!wh->dma_dir)
		cntr |= CNTR_DDIR;

	regs->CNTR = cntr;
	mb();			/* make sure CNTR is updated before next IO */

	/* flush if we were reading */
	if (wh->dma_dir) {
		regs->FLUSH = 1;
		mb();		/* don't allow prefetch */
		while (!(regs->ISTR & ISTR_FE_FLG))
			barrier();
		mb();		/* no IO until FLUSH is done */
	}

	/* clear a possible interrupt */
	/* I think that this CINT is only necessary if you are
	 * using the terminal count features.   HM 7 Mar 1994
	 */
	regs->CINT = 1;

	/* stop DMA */
	regs->SP_DMA = 1;
	mb();			/* make sure DMA is stopped before next IO */

	/* restore the CONTROL bits (minus the direction flag) */
	regs->CNTR = CNTR_PDMD | CNTR_INTEN;
	mb();			/* make sure CNTR is updated before next IO */

	dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
			 scsi_pointer->this_residual,
			 DMA_DIR(wh->dma_dir));

	/* copy from a bounce buffer, if necessary */
	if (status && wh->dma_bounce_buffer) {
		if (SCpnt) {
			if (wh->dma_dir && SCpnt)
				memcpy(scsi_pointer->ptr, wh->dma_bounce_buffer,
				       scsi_pointer->this_residual);
			kfree(wh->dma_bounce_buffer);
			wh->dma_bounce_buffer = NULL;
			wh->dma_bounce_len = 0;
		} else {
			kfree(wh->dma_bounce_buffer);
			wh->dma_bounce_buffer = NULL;
			wh->dma_bounce_len = 0;
		}
	}
}

static const struct scsi_host_template amiga_a3000_scsi_template = {
	.module			= THIS_MODULE,
	.name			= "Amiga 3000 built-in SCSI",
	.show_info		= wd33c93_show_info,
	.write_info		= wd33c93_write_info,
	.proc_name		= "A3000",
	.queuecommand		= wd33c93_queuecommand,
	.eh_abort_handler	= wd33c93_abort,
	.eh_host_reset_handler	= wd33c93_host_reset,
	.can_queue		= CAN_QUEUE,
	.this_id		= 7,
	.sg_tablesize		= SG_ALL,
	.cmd_per_lun		= CMD_PER_LUN,
	.cmd_size		= sizeof(struct scsi_pointer),
};

static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
{
	struct resource *res;
	struct Scsi_Host *instance;
	int error;
	struct a3000_scsiregs *regs;
	wd33c93_regs wdregs;
	struct a3000_hostdata *hdata;

	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
		dev_warn(&pdev->dev, "cannot use 32 bit DMA\n");
		return -ENODEV;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;

	if (!request_mem_region(res->start, resource_size(res), "wd33c93"))
		return -EBUSY;

	instance = scsi_host_alloc(&amiga_a3000_scsi_template,
				   sizeof(struct a3000_hostdata));
	if (!instance) {
		error = -ENOMEM;
		goto fail_alloc;
	}

	instance->irq = IRQ_AMIGA_PORTS;

	regs = ZTWO_VADDR(res->start);
	regs->DAWR = DAWR_A3000;

	wdregs.SASR = &regs->SASR;
	wdregs.SCMD = &regs->SCMD;

	hdata = shost_priv(instance);
	hdata->dev = &pdev->dev;
	hdata->wh.no_sync = 0xff;
	hdata->wh.fast = 0;
	hdata->wh.dma_mode = CTRL_DMA;
	hdata->regs = regs;

	wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_12_15);
	error = request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED,
			    "A3000 SCSI", instance);
	if (error)
		goto fail_irq;

	regs->CNTR = CNTR_PDMD | CNTR_INTEN;

	error = scsi_add_host(instance, NULL);
	if (error)
		goto fail_host;

	platform_set_drvdata(pdev, instance);

	scsi_scan_host(instance);
	return 0;

fail_host:
	free_irq(IRQ_AMIGA_PORTS, instance);
fail_irq:
	scsi_host_put(instance);
fail_alloc:
	release_mem_region(res->start, resource_size(res));
	return error;
}

static int __exit amiga_a3000_scsi_remove(struct platform_device *pdev)
{
	struct Scsi_Host *instance = platform_get_drvdata(pdev);
	struct a3000_hostdata *hdata = shost_priv(instance);
	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);

	hdata->regs->CNTR = 0;
	scsi_remove_host(instance);
	free_irq(IRQ_AMIGA_PORTS, instance);
	scsi_host_put(instance);
	release_mem_region(res->start, resource_size(res));
	return 0;
}

static struct platform_driver amiga_a3000_scsi_driver = {
	.remove = __exit_p(amiga_a3000_scsi_remove),
	.driver   = {
		.name	= "amiga-a3000-scsi",
	},
};

module_platform_driver_probe(amiga_a3000_scsi_driver, amiga_a3000_scsi_probe);

MODULE_DESCRIPTION("Amiga 3000 built-in SCSI");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:amiga-a3000-scsi");
n> 32; #endif if (size) *size = mc_ch_readl(mc, MC_BROADCAST_CHANNEL, offset + 0x8) << 17; return 0; } EXPORT_SYMBOL_GPL(tegra_mc_get_carveout_info); static int tegra_mc_block_dma_common(struct tegra_mc *mc, const struct tegra_mc_reset *rst) { unsigned long flags; u32 value; spin_lock_irqsave(&mc->lock, flags); value = mc_readl(mc, rst->control) | BIT(rst->bit); mc_writel(mc, value, rst->control); spin_unlock_irqrestore(&mc->lock, flags); return 0; } static bool tegra_mc_dma_idling_common(struct tegra_mc *mc, const struct tegra_mc_reset *rst) { return (mc_readl(mc, rst->status) & BIT(rst->bit)) != 0; } static int tegra_mc_unblock_dma_common(struct tegra_mc *mc, const struct tegra_mc_reset *rst) { unsigned long flags; u32 value; spin_lock_irqsave(&mc->lock, flags); value = mc_readl(mc, rst->control) & ~BIT(rst->bit); mc_writel(mc, value, rst->control); spin_unlock_irqrestore(&mc->lock, flags); return 0; } static int tegra_mc_reset_status_common(struct tegra_mc *mc, const struct tegra_mc_reset *rst) { return (mc_readl(mc, rst->control) & BIT(rst->bit)) != 0; } const struct tegra_mc_reset_ops tegra_mc_reset_ops_common = { .block_dma = tegra_mc_block_dma_common, .dma_idling = tegra_mc_dma_idling_common, .unblock_dma = tegra_mc_unblock_dma_common, .reset_status = tegra_mc_reset_status_common, }; static inline struct tegra_mc *reset_to_mc(struct reset_controller_dev *rcdev) { return container_of(rcdev, struct tegra_mc, reset); } static const struct tegra_mc_reset *tegra_mc_reset_find(struct tegra_mc *mc, unsigned long id) { unsigned int i; for (i = 0; i < mc->soc->num_resets; i++) if (mc->soc->resets[i].id == id) return &mc->soc->resets[i]; return NULL; } static int tegra_mc_hotreset_assert(struct reset_controller_dev *rcdev, unsigned long id) { struct tegra_mc *mc = reset_to_mc(rcdev); const struct tegra_mc_reset_ops *rst_ops; const struct tegra_mc_reset *rst; int retries = 500; int err; rst = tegra_mc_reset_find(mc, id); if (!rst) return -ENODEV; rst_ops = mc->soc->reset_ops; if (!rst_ops) return -ENODEV; /* DMA flushing will fail if reset is already asserted */ if (rst_ops->reset_status) { /* check whether reset is asserted */ if (rst_ops->reset_status(mc, rst)) return 0; } if (rst_ops->block_dma) { /* block clients DMA requests */ err = rst_ops->block_dma(mc, rst); if (err) { dev_err(mc->dev, "failed to block %s DMA: %d\n", rst->name, err); return err; } } if (rst_ops->dma_idling) { /* wait for completion of the outstanding DMA requests */ while (!rst_ops->dma_idling(mc, rst)) { if (!retries--) { dev_err(mc->dev, "failed to flush %s DMA\n", rst->name); return -EBUSY; } usleep_range(10, 100); } } if (rst_ops->hotreset_assert) { /* clear clients DMA requests sitting before arbitration */ err = rst_ops->hotreset_assert(mc, rst); if (err) { dev_err(mc->dev, "failed to hot reset %s: %d\n", rst->name, err); return err; } } return 0; } static int tegra_mc_hotreset_deassert(struct reset_controller_dev *rcdev, unsigned long id) { struct tegra_mc *mc = reset_to_mc(rcdev); const struct tegra_mc_reset_ops *rst_ops; const struct tegra_mc_reset *rst; int err; rst = tegra_mc_reset_find(mc, id); if (!rst) return -ENODEV; rst_ops = mc->soc->reset_ops; if (!rst_ops) return -ENODEV; if (rst_ops->hotreset_deassert) { /* take out client from hot reset */ err = rst_ops->hotreset_deassert(mc, rst); if (err) { dev_err(mc->dev, "failed to deassert hot reset %s: %d\n", rst->name, err); return err; } } if (rst_ops->unblock_dma) { /* allow new DMA requests to proceed to arbitration */ err = rst_ops->unblock_dma(mc, rst); if (err) { dev_err(mc->dev, "failed to unblock %s DMA : %d\n", rst->name, err); return err; } } return 0; } static int tegra_mc_hotreset_status(struct reset_controller_dev *rcdev, unsigned long id) { struct tegra_mc *mc = reset_to_mc(rcdev); const struct tegra_mc_reset_ops *rst_ops; const struct tegra_mc_reset *rst; rst = tegra_mc_reset_find(mc, id); if (!rst) return -ENODEV; rst_ops = mc->soc->reset_ops; if (!rst_ops) return -ENODEV; return rst_ops->reset_status(mc, rst); } static const struct reset_control_ops tegra_mc_reset_ops = { .assert = tegra_mc_hotreset_assert, .deassert = tegra_mc_hotreset_deassert, .status = tegra_mc_hotreset_status, }; static int tegra_mc_reset_setup(struct tegra_mc *mc) { int err; mc->reset.ops = &tegra_mc_reset_ops; mc->reset.owner = THIS_MODULE; mc->reset.of_node = mc->dev->of_node; mc->reset.of_reset_n_cells = 1; mc->reset.nr_resets = mc->soc->num_resets; err = reset_controller_register(&mc->reset); if (err < 0) return err; return 0; } int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate) { unsigned int i; struct tegra_mc_timing *timing = NULL; for (i = 0; i < mc->num_timings; i++) { if (mc->timings[i].rate == rate) { timing = &mc->timings[i]; break; } } if (!timing) { dev_err(mc->dev, "no memory timing registered for rate %lu\n", rate); return -EINVAL; } for (i = 0; i < mc->soc->num_emem_regs; ++i) mc_writel(mc, timing->emem_data[i], mc->soc->emem_regs[i]); return 0; } EXPORT_SYMBOL_GPL(tegra_mc_write_emem_configuration); unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc) { u8 dram_count; dram_count = mc_readl(mc, MC_EMEM_ADR_CFG); dram_count &= MC_EMEM_ADR_CFG_EMEM_NUMDEV; dram_count++; return dram_count; } EXPORT_SYMBOL_GPL(tegra_mc_get_emem_device_count); #if defined(CONFIG_ARCH_TEGRA_3x_SOC) || \ defined(CONFIG_ARCH_TEGRA_114_SOC) || \ defined(CONFIG_ARCH_TEGRA_124_SOC) || \ defined(CONFIG_ARCH_TEGRA_132_SOC) || \ defined(CONFIG_ARCH_TEGRA_210_SOC) static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc) { unsigned long long tick; unsigned int i; u32 value; /* compute the number of MC clock cycles per tick */ tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk); do_div(tick, NSEC_PER_SEC); value = mc_readl(mc, MC_EMEM_ARB_CFG); value &= ~MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK; value |= MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(tick); mc_writel(mc, value, MC_EMEM_ARB_CFG); /* write latency allowance defaults */ for (i = 0; i < mc->soc->num_clients; i++) { const struct tegra_mc_client *client = &mc->soc->clients[i]; u32 value; value = mc_readl(mc, client->regs.la.reg); value &= ~(client->regs.la.mask << client->regs.la.shift); value |= (client->regs.la.def & client->regs.la.mask) << client->regs.la.shift; mc_writel(mc, value, client->regs.la.reg); } /* latch new values */ mc_writel(mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL); return 0; } static int load_one_timing(struct tegra_mc *mc, struct tegra_mc_timing *timing, struct device_node *node) { int err; u32 tmp; err = of_property_read_u32(node, "clock-frequency", &tmp); if (err) { dev_err(mc->dev, "timing %pOFn: failed to read rate\n", node); return err; } timing->rate = tmp; timing->emem_data = devm_kcalloc(mc->dev, mc->soc->num_emem_regs, sizeof(u32), GFP_KERNEL); if (!timing->emem_data) return -ENOMEM; err = of_property_read_u32_array(node, "nvidia,emem-configuration", timing->emem_data, mc->soc->num_emem_regs); if (err) { dev_err(mc->dev, "timing %pOFn: failed to read EMEM configuration\n", node); return err; } return 0;