1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ESP front-end for Amiga ZORRO SCSI systems.
4 *
5 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
6 *
7 * Copyright (C) 2011,2018 Michael Schmitz (schmitz@debian.org) for
8 *               migration to ESP SCSI core
9 *
10 * Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for
11 *               Blizzard 1230 DMA and probe function fixes
12 */
13/*
14 * ZORRO bus code from:
15 */
16/*
17 * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
18 *		Amiga MacroSystemUS WarpEngine SCSI controller.
19 *		Amiga Technologies/DKB A4091 SCSI controller.
20 *
21 * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
22 * plus modifications of the 53c7xx.c driver to support the Amiga.
23 *
24 * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
25 */
26
27#define pr_fmt(fmt)        KBUILD_MODNAME ": " fmt
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/interrupt.h>
32#include <linux/dma-mapping.h>
33#include <linux/scatterlist.h>
34#include <linux/delay.h>
35#include <linux/zorro.h>
36#include <linux/slab.h>
37#include <linux/pgtable.h>
38
39#include <asm/page.h>
40#include <asm/cacheflush.h>
41#include <asm/amigahw.h>
42#include <asm/amigaints.h>
43
44#include <scsi/scsi_host.h>
45#include <scsi/scsi_transport_spi.h>
46#include <scsi/scsi_device.h>
47#include <scsi/scsi_tcq.h>
48
49#include "esp_scsi.h"
50
51MODULE_AUTHOR("Michael Schmitz <schmitz@debian.org>");
52MODULE_DESCRIPTION("Amiga Zorro NCR5C9x (ESP) driver");
53MODULE_LICENSE("GPL");
54
55/* per-board register layout definitions */
56
57/* Blizzard 1230 DMA interface */
58
59struct blz1230_dma_registers {
60	unsigned char dma_addr;		/* DMA address      [0x0000] */
61	unsigned char dmapad2[0x7fff];
62	unsigned char dma_latch;	/* DMA latch        [0x8000] */
63};
64
65/* Blizzard 1230II DMA interface */
66
67struct blz1230II_dma_registers {
68	unsigned char dma_addr;		/* DMA address      [0x0000] */
69	unsigned char dmapad2[0xf];
70	unsigned char dma_latch;	/* DMA latch        [0x0010] */
71};
72
73/* Blizzard 2060 DMA interface */
74
75struct blz2060_dma_registers {
76	unsigned char dma_led_ctrl;	/* DMA led control   [0x000] */
77	unsigned char dmapad1[0x0f];
78	unsigned char dma_addr0;	/* DMA address (MSB) [0x010] */
79	unsigned char dmapad2[0x03];
80	unsigned char dma_addr1;	/* DMA address       [0x014] */
81	unsigned char dmapad3[0x03];
82	unsigned char dma_addr2;	/* DMA address       [0x018] */
83	unsigned char dmapad4[0x03];
84	unsigned char dma_addr3;	/* DMA address (LSB) [0x01c] */
85};
86
87/* DMA control bits */
88#define DMA_WRITE 0x80000000
89
90/* Cyberstorm DMA interface */
91
92struct cyber_dma_registers {
93	unsigned char dma_addr0;	/* DMA address (MSB) [0x000] */
94	unsigned char dmapad1[1];
95	unsigned char dma_addr1;	/* DMA address       [0x002] */
96	unsigned char dmapad2[1];
97	unsigned char dma_addr2;	/* DMA address       [0x004] */
98	unsigned char dmapad3[1];
99	unsigned char dma_addr3;	/* DMA address (LSB) [0x006] */
100	unsigned char dmapad4[0x3fb];
101	unsigned char cond_reg;		/* DMA cond    (ro)  [0x402] */
102#define ctrl_reg  cond_reg		/* DMA control (wo)  [0x402] */
103};
104
105/* DMA control bits */
106#define CYBER_DMA_WRITE  0x40	/* DMA direction. 1 = write */
107#define CYBER_DMA_Z3     0x20	/* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */
108
109/* DMA status bits */
110#define CYBER_DMA_HNDL_INTR 0x80	/* DMA IRQ pending? */
111
112/* The CyberStorm II DMA interface */
113struct cyberII_dma_registers {
114	unsigned char cond_reg;		/* DMA cond    (ro)  [0x000] */
115#define ctrl_reg  cond_reg		/* DMA control (wo)  [0x000] */
116	unsigned char dmapad4[0x3f];
117	unsigned char dma_addr0;	/* DMA address (MSB) [0x040] */
118	unsigned char dmapad1[3];
119	unsigned char dma_addr1;	/* DMA address       [0x044] */
120	unsigned char dmapad2[3];
121	unsigned char dma_addr2;	/* DMA address       [0x048] */
122	unsigned char dmapad3[3];
123	unsigned char dma_addr3;	/* DMA address (LSB) [0x04c] */
124};
125
126/* Fastlane DMA interface */
127
128struct fastlane_dma_registers {
129	unsigned char cond_reg;		/* DMA status  (ro) [0x0000] */
130#define ctrl_reg  cond_reg		/* DMA control (wo) [0x0000] */
131	char dmapad1[0x3f];
132	unsigned char clear_strobe;	/* DMA clear   (wo) [0x0040] */
133};
134
135/*
136 * The controller registers can be found in the Z2 config area at these
137 * offsets:
138 */
139#define FASTLANE_ESP_ADDR	0x1000001
140
141/* DMA status bits */
142#define FASTLANE_DMA_MINT	0x80
143#define FASTLANE_DMA_IACT	0x40
144#define FASTLANE_DMA_CREQ	0x20
145
146/* DMA control bits */
147#define FASTLANE_DMA_FCODE	0xa0
148#define FASTLANE_DMA_MASK	0xf3
149#define FASTLANE_DMA_WRITE	0x08	/* 1 = write */
150#define FASTLANE_DMA_ENABLE	0x04	/* Enable DMA */
151#define FASTLANE_DMA_EDI	0x02	/* Enable DMA IRQ ? */
152#define FASTLANE_DMA_ESI	0x01	/* Enable SCSI IRQ */
153
154/*
155 * private data used for driver
156 */
157struct zorro_esp_priv {
158	struct esp *esp;		/* our ESP instance - for Scsi_host* */
159	void __iomem *board_base;	/* virtual address (Zorro III board) */
160	int zorro3;			/* board is Zorro III */
161	unsigned char ctrl_data;	/* shadow copy of ctrl_reg */
162};
163
164/*
165 * On all implementations except for the Oktagon, padding between ESP
166 * registers is three bytes.
167 * On Oktagon, it is one byte - use a different accessor there.
168 *
169 * Oktagon needs PDMA - currently unsupported!
170 */
171
172static void zorro_esp_write8(struct esp *esp, u8 val, unsigned long reg)
173{
174	writeb(val, esp->regs + (reg * 4UL));
175}
176
177static u8 zorro_esp_read8(struct esp *esp, unsigned long reg)
178{
179	return readb(esp->regs + (reg * 4UL));
180}
181
182static int zorro_esp_irq_pending(struct esp *esp)
183{
184	/* check ESP status register; DMA has no status reg. */
185	if (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
186		return 1;
187
188	return 0;
189}
190
191static int cyber_esp_irq_pending(struct esp *esp)
192{
193	struct cyber_dma_registers __iomem *dregs = esp->dma_regs;
194	unsigned char dma_status = readb(&dregs->cond_reg);
195
196	/* It's important to check the DMA IRQ bit in the correct way! */
197	return ((zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) &&
198		(dma_status & CYBER_DMA_HNDL_INTR));
199}
200
201static int fastlane_esp_irq_pending(struct esp *esp)
202{
203	struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
204	unsigned char dma_status;
205
206	dma_status = readb(&dregs->cond_reg);
207
208	if (dma_status & FASTLANE_DMA_IACT)
209		return 0;	/* not our IRQ */
210
211	/* Return non-zero if ESP requested IRQ */
212	return (
213	   (dma_status & FASTLANE_DMA_CREQ) &&
214	   (!(dma_status & FASTLANE_DMA_MINT)) &&
215	   (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR));
216}
217
218static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
219					u32 dma_len)
220{
221	return dma_len > (1U << 16) ? (1U << 16) : dma_len;
222}
223
224static u32 fastlane_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
225					u32 dma_len)
226{
227	/* The old driver used 0xfffc as limit, so do that here too */
228	return dma_len > 0xfffc ? 0xfffc : dma_len;
229}
230
231static void zorro_esp_reset_dma(struct esp *esp)
232{
233	/* nothing to do here */
234}
235
236static void zorro_esp_dma_drain(struct esp *esp)
237{
238	/* nothing to do here */
239}
240
241static void zorro_esp_dma_invalidate(struct esp *esp)
242{
243	/* nothing to do here */
244}
245
246static void fastlane_esp_dma_invalidate(struct esp *esp)
247{
248	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
249	struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
250	unsigned char *ctrl_data = &zep->ctrl_data;
251
252	*ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK);
253	writeb(0, &dregs->clear_strobe);
254	z_writel(0, zep->board_base);
255}
256
257/* Blizzard 1230/60 SCSI-IV DMA */
258
259static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
260			u32 esp_count, u32 dma_count, int write, u8 cmd)
261{
262	struct blz1230_dma_registers __iomem *dregs = esp->dma_regs;
263	u8 phase = esp->sreg & ESP_STAT_PMASK;
264
265	/*
266	 * Use PIO if transferring message bytes to esp->command_block_dma.
267	 * PIO requires a virtual address, so substitute esp->command_block
268	 * for addr.
269	 */
270	if (phase == ESP_MIP && addr == esp->command_block_dma) {
271		esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
272				 dma_count, write, cmd);
273		return;
274	}
275
276	/* Clear the results of a possible prior esp->ops->send_dma_cmd() */
277	esp->send_cmd_error = 0;
278	esp->send_cmd_residual = 0;
279
280	if (write)
281		/* DMA receive */
282		dma_sync_single_for_device(esp->dev, addr, esp_count,
283				DMA_FROM_DEVICE);
284	else
285		/* DMA send */
286		dma_sync_single_for_device(esp->dev, addr, esp_count,
287				DMA_TO_DEVICE);
288
289	addr >>= 1;
290	if (write)
291		addr &= ~(DMA_WRITE);
292	else
293		addr |= DMA_WRITE;
294
295	writeb((addr >> 24) & 0xff, &dregs->dma_latch);
296	writeb((addr >> 24) & 0xff, &dregs->dma_addr);
297	writeb((addr >> 16) & 0xff, &dregs->dma_addr);
298	writeb((addr >>  8) & 0xff, &dregs->dma_addr);
299	writeb(addr & 0xff, &dregs->dma_addr);
300
301	scsi_esp_cmd(esp, ESP_CMD_DMA);
302	zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
303	zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
304
305	scsi_esp_cmd(esp, cmd);
306}
307
308/* Blizzard 1230-II DMA */
309
310static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
311			u32 esp_count, u32 dma_count, int write, u8 cmd)
312{
313	struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs;
314	u8 phase = esp->sreg & ESP_STAT_PMASK;
315
316	/* Use PIO if transferring message bytes to esp->command_block_dma */
317	if (phase == ESP_MIP && addr == esp->command_block_dma) {
318		esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
319				 dma_count, write, cmd);
320		return;
321	}
322
323	esp->send_cmd_error = 0;
324	esp->send_cmd_residual = 0;
325
326	if (write)
327		/* DMA receive */
328		dma_sync_single_for_device(esp->dev, addr, esp_count,
329				DMA_FROM_DEVICE);
330	else
331		/* DMA send */
332		dma_sync_single_for_device(esp->dev, addr, esp_count,
333				DMA_TO_DEVICE);
334
335	addr >>= 1;
336	if (write)
337		addr &= ~(DMA_WRITE);
338	else
339		addr |= DMA_WRITE;
340
341	writeb((addr >> 24) & 0xff, &dregs->dma_latch);
342	writeb((addr >> 16) & 0xff, &dregs->dma_addr);
343	writeb((addr >>  8) & 0xff, &dregs->dma_addr);
344	writeb(addr & 0xff, &dregs->dma_addr);
345
346	scsi_esp_cmd(esp, ESP_CMD_DMA);
347	zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
348	zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
349
350	scsi_esp_cmd(esp, cmd);
351}
352
353/* Blizzard 2060 DMA */
354
355static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
356			u32 esp_count, u32 dma_count, int write, u8 cmd)
357{
358	struct blz2060_dma_registers __iomem *dregs = esp->dma_regs;
359	u8 phase = esp->sreg & ESP_STAT_PMASK;
360
361	/* Use PIO if transferring message bytes to esp->command_block_dma */
362	if (phase == ESP_MIP && addr == esp->command_block_dma) {
363		esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
364				 dma_count, write, cmd);
365		return;
366	}
367
368	esp->send_cmd_error = 0;
369	esp->send_cmd_residual = 0;
370
371	if (write)
372		/* DMA receive */
373		dma_sync_single_for_device(esp->dev, addr, esp_count,
374				DMA_FROM_DEVICE);
375	else
376		/* DMA send */
377		dma_sync_single_for_device(esp->dev, addr, esp_count,
378				DMA_TO_DEVICE);
379
380	addr >>= 1;
381	if (write)
382		addr &= ~(DMA_WRITE);
383	else
384		addr |= DMA_WRITE;
385
386	writeb(addr & 0xff, &dregs->dma_addr3);
387	writeb((addr >>  8) & 0xff, &dregs->dma_addr2);
388	writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
389	writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
390
391	scsi_esp_cmd(esp, ESP_CMD_DMA);
392	zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
393	zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
394
395	scsi_esp_cmd(esp, cmd);
396}
397
398/* Cyberstorm I DMA */
399
400static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
401			u32 esp_count, u32 dma_count, int write, u8 cmd)
402{
403	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
404	struct cyber_dma_registers __iomem *dregs = esp->dma_regs;
405	u8 phase = esp->sreg & ESP_STAT_PMASK;
406	unsigned char *ctrl_data = &zep->ctrl_data;
407
408	/* Use PIO if transferring message bytes to esp->command_block_dma */
409	if (phase == ESP_MIP && addr == esp->command_block_dma) {
410		esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
411				 dma_count, write, cmd);
412		return;
413	}
414
415	esp->send_cmd_error = 0;
416	esp->send_cmd_residual = 0;
417
418	zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
419	zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
420
421	if (write) {
422		/* DMA receive */
423		dma_sync_single_for_device(esp->dev, addr, esp_count,
424				DMA_FROM_DEVICE);
425		addr &= ~(1);
426	} else {
427		/* DMA send */
428		dma_sync_single_for_device(esp->dev, addr, esp_count,
429				DMA_TO_DEVICE);
430		addr |= 1;
431	}
432
433	writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
434	writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
435	writeb((addr >>  8) & 0xff, &dregs->dma_addr2);
436	writeb(addr & 0xff, &dregs->dma_addr3);
437
438	if (write)
439		*ctrl_data &= ~(CYBER_DMA_WRITE);
440	else
441		*ctrl_data |= CYBER_DMA_WRITE;
442
443	*ctrl_data &= ~(CYBER_DMA_Z3);	/* Z2, do 16 bit DMA */
444
445	writeb(*ctrl_data, &dregs->ctrl_reg);
446
447	scsi_esp_cmd(esp, cmd);
448}
449
450/* Cyberstorm II DMA */
451
452static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr,
453			u32 esp_count, u32 dma_count, int write, u8 cmd)
454{
455	struct cyberII_dma_registers __iomem *dregs = esp->dma_regs;
456	u8 phase = esp->sreg & ESP_STAT_PMASK;
457
458	/* Use PIO if transferring message bytes to esp->command_block_dma */
459	if (phase == ESP_MIP && addr == esp->command_block_dma) {
460		esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
461				 dma_count, write, cmd);
462		return;
463	}
464
465	esp->send_cmd_error = 0;
466	esp->send_cmd_residual = 0;
467
468	zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
469	zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
470
471	if (write) {
472		/* DMA receive */
473		dma_sync_single_for_device(esp->dev, addr, esp_count,
474				DMA_FROM_DEVICE);
475		addr &= ~(1);
476	} else {
477		/* DMA send */
478		dma_sync_single_for_device(esp->dev, addr, esp_count,
479				DMA_TO_DEVICE);
480		addr |= 1;
481	}
482
483	writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
484	writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
485	writeb((addr >>  8) & 0xff, &dregs->dma_addr2);
486	writeb(addr & 0xff, &dregs->dma_addr3);
487
488	scsi_esp_cmd(esp, cmd);
489}
490
491/* Fastlane DMA */
492
493static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
494			u32 esp_count, u32 dma_count, int write, u8 cmd)
495{
496	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
497	struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
498	u8 phase = esp->sreg & ESP_STAT_PMASK;
499	unsigned char *ctrl_data = &zep->ctrl_data;
500
501	/* Use PIO if transferring message bytes to esp->command_block_dma */
502	if (phase == ESP_MIP && addr == esp->command_block_dma) {
503		esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
504				 dma_count, write, cmd);
505		return;
506	}
507
508	esp->send_cmd_error = 0;
509	esp->send_cmd_residual = 0;
510
511	zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
512	zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
513
514	if (write) {
515		/* DMA receive */
516		dma_sync_single_for_device(esp->dev, addr, esp_count,
517				DMA_FROM_DEVICE);
518		addr &= ~(1);
519	} else {
520		/* DMA send */
521		dma_sync_single_for_device(esp->dev, addr, esp_count,
522				DMA_TO_DEVICE);
523		addr |= 1;
524	}
525
526	writeb(0, &dregs->clear_strobe);
527	z_writel(addr, ((addr & 0x00ffffff) + zep->board_base));
528
529	if (write) {
530		*ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK) |
531				FASTLANE_DMA_ENABLE;
532	} else {
533		*ctrl_data = ((*ctrl_data & FASTLANE_DMA_MASK) |
534				FASTLANE_DMA_ENABLE |
535				FASTLANE_DMA_WRITE);
536	}
537
538	writeb(*ctrl_data, &dregs->ctrl_reg);
539
540	scsi_esp_cmd(esp, cmd);
541}
542
543static int zorro_esp_dma_error(struct esp *esp)
544{
545	return esp->send_cmd_error;
546}
547
548/* per-board ESP driver ops */
549
550static const struct esp_driver_ops blz1230_esp_ops = {
551	.esp_write8		= zorro_esp_write8,
552	.esp_read8		= zorro_esp_read8,
553	.irq_pending		= zorro_esp_irq_pending,
554	.dma_length_limit	= zorro_esp_dma_length_limit,
555	.reset_dma		= zorro_esp_reset_dma,
556	.dma_drain		= zorro_esp_dma_drain,
557	.dma_invalidate		= zorro_esp_dma_invalidate,
558	.send_dma_cmd		= zorro_esp_send_blz1230_dma_cmd,
559	.dma_error		= zorro_esp_dma_error,
560};
561
562static const struct esp_driver_ops blz1230II_esp_ops = {
563	.esp_write8		= zorro_esp_write8,
564	.esp_read8		= zorro_esp_read8,
565	.irq_pending		= zorro_esp_irq_pending,
566	.dma_length_limit	= zorro_esp_dma_length_limit,
567	.reset_dma		= zorro_esp_reset_dma,
568	.dma_drain		= zorro_esp_dma_drain,
569	.dma_invalidate		= zorro_esp_dma_invalidate,
570	.send_dma_cmd		= zorro_esp_send_blz1230II_dma_cmd,
571	.dma_error		= zorro_esp_dma_error,
572};
573
574static const struct esp_driver_ops blz2060_esp_ops = {
575	.esp_write8		= zorro_esp_write8,
576	.esp_read8		= zorro_esp_read8,
577	.irq_pending		= zorro_esp_irq_pending,
578	.dma_length_limit	= zorro_esp_dma_length_limit,
579	.reset_dma		= zorro_esp_reset_dma,
580	.dma_drain		= zorro_esp_dma_drain,
581	.dma_invalidate		= zorro_esp_dma_invalidate,
582	.send_dma_cmd		= zorro_esp_send_blz2060_dma_cmd,
583	.dma_error		= zorro_esp_dma_error,
584};
585
586static const struct esp_driver_ops cyber_esp_ops = {
587	.esp_write8		= zorro_esp_write8,
588	.esp_read8		= zorro_esp_read8,
589	.irq_pending		= cyber_esp_irq_pending,
590	.dma_length_limit	= zorro_esp_dma_length_limit,
591	.reset_dma		= zorro_esp_reset_dma,
592	.dma_drain		= zorro_esp_dma_drain,
593	.dma_invalidate		= zorro_esp_dma_invalidate,
594	.send_dma_cmd		= zorro_esp_send_cyber_dma_cmd,
595	.dma_error		= zorro_esp_dma_error,
596};
597
598static const struct esp_driver_ops cyberII_esp_ops = {
599	.esp_write8		= zorro_esp_write8,
600	.esp_read8		= zorro_esp_read8,
601	.irq_pending		= zorro_esp_irq_pending,
602	.dma_length_limit	= zorro_esp_dma_length_limit,
603	.reset_dma		= zorro_esp_reset_dma,
604	.dma_drain		= zorro_esp_dma_drain,
605	.dma_invalidate		= zorro_esp_dma_invalidate,
606	.send_dma_cmd		= zorro_esp_send_cyberII_dma_cmd,
607	.dma_error		= zorro_esp_dma_error,
608};
609
610static const struct esp_driver_ops fastlane_esp_ops = {
611	.esp_write8		= zorro_esp_write8,
612	.esp_read8		= zorro_esp_read8,
613	.irq_pending		= fastlane_esp_irq_pending,
614	.dma_length_limit	= fastlane_esp_dma_length_limit,
615	.reset_dma		= zorro_esp_reset_dma,
616	.dma_drain		= zorro_esp_dma_drain,
617	.dma_invalidate		= fastlane_esp_dma_invalidate,
618	.send_dma_cmd		= zorro_esp_send_fastlane_dma_cmd,
619	.dma_error		= zorro_esp_dma_error,
620};
621
622/* Zorro driver config data */
623
624struct zorro_driver_data {
625	const char *name;
626	unsigned long offset;
627	unsigned long dma_offset;
628	int absolute;	/* offset is absolute address */
629	int scsi_option;
630	const struct esp_driver_ops *esp_ops;
631};
632
633/* board types */
634
635enum {
636	ZORRO_BLZ1230,
637	ZORRO_BLZ1230II,
638	ZORRO_BLZ2060,
639	ZORRO_CYBER,
640	ZORRO_CYBERII,
641	ZORRO_FASTLANE,
642};
643
644/* per-board config data */
645
646static const struct zorro_driver_data zorro_esp_boards[] = {
647	[ZORRO_BLZ1230] = {
648				.name		= "Blizzard 1230",
649				.offset		= 0x8000,
650				.dma_offset	= 0x10000,
651				.scsi_option	= 1,
652				.esp_ops	= &blz1230_esp_ops,
653	},
654	[ZORRO_BLZ1230II] = {
655				.name		= "Blizzard 1230II",
656				.offset		= 0x10000,
657				.dma_offset	= 0x10021,
658				.scsi_option	= 1,
659				.esp_ops	= &blz1230II_esp_ops,
660	},
661	[ZORRO_BLZ2060] = {
662				.name		= "Blizzard 2060",
663				.offset		= 0x1ff00,
664				.dma_offset	= 0x1ffe0,
665				.esp_ops	= &blz2060_esp_ops,
666	},
667	[ZORRO_CYBER] = {
668				.name		= "CyberStormI",
669				.offset		= 0xf400,
670				.dma_offset	= 0xf800,
671				.esp_ops	= &cyber_esp_ops,
672	},
673	[ZORRO_CYBERII] = {
674				.name		= "CyberStormII",
675				.offset		= 0x1ff03,
676				.dma_offset	= 0x1ff43,
677				.scsi_option	= 1,
678				.esp_ops	= &cyberII_esp_ops,
679	},
680	[ZORRO_FASTLANE] = {
681				.name		= "Fastlane",
682				.offset		= 0x1000001,
683				.dma_offset	= 0x1000041,
684				.esp_ops	= &fastlane_esp_ops,
685	},
686};
687
688static const struct zorro_device_id zorro_esp_zorro_tbl[] = {
689	{	/* Blizzard 1230 IV */
690		.id = ZORRO_ID(PHASE5, 0x11, 0),
691		.driver_data = ZORRO_BLZ1230,
692	},
693	{	/* Blizzard 1230 II (Zorro II) or Fastlane (Zorro III) */
694		.id = ZORRO_ID(PHASE5, 0x0B, 0),
695		.driver_data = ZORRO_BLZ1230II,
696	},
697	{	/* Blizzard 2060 */
698		.id = ZORRO_ID(PHASE5, 0x18, 0),
699		.driver_data = ZORRO_BLZ2060,
700	},
701	{	/* Cyberstorm */
702		.id = ZORRO_ID(PHASE5, 0x0C, 0),
703		.driver_data = ZORRO_CYBER,
704	},
705	{	/* Cyberstorm II */
706		.id = ZORRO_ID(PHASE5, 0x19, 0),
707		.driver_data = ZORRO_CYBERII,
708	},
709	{ 0 }
710};
711MODULE_DEVICE_TABLE(zorro, zorro_esp_zorro_tbl);
712
713static int zorro_esp_probe(struct zorro_dev *z,
714				       const struct zorro_device_id *ent)
715{
716	const struct scsi_host_template *tpnt = &scsi_esp_template;
717	struct Scsi_Host *host;
718	struct esp *esp;
719	const struct zorro_driver_data *zdd;
720	struct zorro_esp_priv *zep;
721	unsigned long board, ioaddr, dmaaddr;
722	int err;
723
724	board = zorro_resource_start(z);
725	zdd = &zorro_esp_boards[ent->driver_data];
726
727	pr_info("%s found at address 0x%lx.\n", zdd->name, board);
728
729	zep = kzalloc(sizeof(*zep), GFP_KERNEL);
730	if (!zep) {
731		pr_err("Can't allocate device private data!\n");
732		return -ENOMEM;
733	}
734
735	/* let's figure out whether we have a Zorro II or Zorro III board */
736	if ((z->rom.er_Type & ERT_TYPEMASK) == ERT_ZORROIII) {
737		if (board > 0xffffff)
738			zep->zorro3 = 1;
739	} else {
740		/*
741		 * Even though most of these boards identify as Zorro II,
742		 * they are in fact CPU expansion slot boards and have full
743		 * access to all of memory. Fix up DMA bitmask here.
744		 */
745		z->dev.coherent_dma_mask = DMA_BIT_MASK(32);
746	}
747
748	/*
749	 * If Zorro III and ID matches Fastlane, our device table entry
750	 * contains data for the Blizzard 1230 II board which does share the
751	 * same ID. Fix up device table entry here.
752	 * TODO: Some Cyberstom060 boards also share this ID but would need
753	 * to use the Cyberstorm I driver data ... we catch this by checking
754	 * for presence of ESP chip later, but don't try to fix up yet.
755	 */
756	if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
757		pr_info("%s at address 0x%lx is Fastlane Z3, fixing data!\n",
758			zdd->name, board);
759		zdd = &zorro_esp_boards[ZORRO_FASTLANE];
760	}
761
762	if (zdd->absolute) {
763		ioaddr  = zdd->offset;
764		dmaaddr = zdd->dma_offset;
765	} else {
766		ioaddr  = board + zdd->offset;
767		dmaaddr = board + zdd->dma_offset;
768	}
769
770	if (!zorro_request_device(z, zdd->name)) {
771		pr_err("cannot reserve region 0x%lx, abort\n",
772		       board);
773		err = -EBUSY;
774		goto fail_free_zep;
775	}
776
777	host = scsi_host_alloc(tpnt, sizeof(struct esp));
778
779	if (!host) {
780		pr_err("No host detected; board configuration problem?\n");
781		err = -ENOMEM;
782		goto fail_release_device;
783	}
784
785	host->base		= ioaddr;
786	host->this_id		= 7;
787
788	esp			= shost_priv(host);
789	esp->host		= host;
790	esp->dev		= &z->dev;
791
792	esp->scsi_id		= host->this_id;
793	esp->scsi_id_mask	= (1 << esp->scsi_id);
794
795	esp->cfreq = 40000000;
796
797	zep->esp = esp;
798
799	dev_set_drvdata(esp->dev, zep);
800
801	/* additional setup required for Fastlane */
802	if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
803		/* map full address space up to ESP base for DMA */
804		zep->board_base = ioremap(board, FASTLANE_ESP_ADDR - 1);
805		if (!zep->board_base) {
806			pr_err("Cannot allocate board address space\n");
807			err = -ENOMEM;
808			goto fail_free_host;
809		}
810		/* initialize DMA control shadow register */
811		zep->ctrl_data = (FASTLANE_DMA_FCODE |
812				  FASTLANE_DMA_EDI | FASTLANE_DMA_ESI);
813	}
814
815	esp->ops = zdd->esp_ops;
816
817	if (ioaddr > 0xffffff)
818		esp->regs = ioremap(ioaddr, 0x20);
819	else
820		/* ZorroII address space remapped nocache by early startup */
821		esp->regs = ZTWO_VADDR(ioaddr);
822
823	if (!esp->regs) {
824		err = -ENOMEM;
825		goto fail_unmap_fastlane;
826	}
827
828	esp->fifo_reg = esp->regs + ESP_FDATA * 4;
829
830	/* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */
831	if (zdd->scsi_option) {
832		zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1);
833		if (zorro_esp_read8(esp, ESP_CFG1) != (ESP_CONFIG1_PENABLE|7)) {
834			err = -ENODEV;
835			goto fail_unmap_regs;
836		}
837	}
838
839	if (zep->zorro3) {
840		/*
841		 * Only Fastlane Z3 for now - add switch for correct struct
842		 * dma_registers size if adding any more
843		 */
844		esp->dma_regs = ioremap(dmaaddr,
845					sizeof(struct fastlane_dma_registers));
846	} else
847		/* ZorroII address space remapped nocache by early startup */
848		esp->dma_regs = ZTWO_VADDR(dmaaddr);
849
850	if (!esp->dma_regs) {
851		err = -ENOMEM;
852		goto fail_unmap_regs;
853	}
854
855	esp->command_block = dma_alloc_coherent(esp->dev, 16,
856						&esp->command_block_dma,
857						GFP_KERNEL);
858
859	if (!esp->command_block) {
860		err = -ENOMEM;
861		goto fail_unmap_dma_regs;
862	}
863
864	host->irq = IRQ_AMIGA_PORTS;
865	err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
866			  "Amiga Zorro ESP", esp);
867	if (err < 0) {
868		err = -ENODEV;
869		goto fail_free_command_block;
870	}
871
872	/* register the chip */
873	err = scsi_esp_register(esp);
874
875	if (err) {
876		err = -ENOMEM;
877		goto fail_free_irq;
878	}
879
880	return 0;
881
882fail_free_irq:
883	free_irq(host->irq, esp);
884
885fail_free_command_block:
886	dma_free_coherent(esp->dev, 16,
887			  esp->command_block,
888			  esp->command_block_dma);
889
890fail_unmap_dma_regs:
891	if (zep->zorro3)
892		iounmap(esp->dma_regs);
893
894fail_unmap_regs:
895	if (ioaddr > 0xffffff)
896		iounmap(esp->regs);
897
898fail_unmap_fastlane:
899	if (zep->zorro3)
900		iounmap(zep->board_base);
901
902fail_free_host:
903	scsi_host_put(host);
904
905fail_release_device:
906	zorro_release_device(z);
907
908fail_free_zep:
909	kfree(zep);
910
911	return err;
912}
913
914static void zorro_esp_remove(struct zorro_dev *z)
915{
916	struct zorro_esp_priv *zep = dev_get_drvdata(&z->dev);
917	struct esp *esp	= zep->esp;
918	struct Scsi_Host *host = esp->host;
919
920	scsi_esp_unregister(esp);
921
922	free_irq(host->irq, esp);
923	dma_free_coherent(esp->dev, 16,
924			  esp->command_block,
925			  esp->command_block_dma);
926
927	if (zep->zorro3) {
928		iounmap(zep->board_base);
929		iounmap(esp->dma_regs);
930	}
931
932	if (host->base > 0xffffff)
933		iounmap(esp->regs);
934
935	scsi_host_put(host);
936
937	zorro_release_device(z);
938
939	kfree(zep);
940}
941
942static struct zorro_driver zorro_esp_driver = {
943	.name	  = KBUILD_MODNAME,
944	.id_table = zorro_esp_zorro_tbl,
945	.probe	  = zorro_esp_probe,
946	.remove	  = zorro_esp_remove,
947};
948
949static int __init zorro_esp_scsi_init(void)
950{
951	return zorro_register_driver(&zorro_esp_driver);
952}
953
954static void __exit zorro_esp_scsi_exit(void)
955{
956	zorro_unregister_driver(&zorro_esp_driver);
957}
958
959module_init(zorro_esp_scsi_init);
960module_exit(zorro_esp_scsi_exit);
961