1/* sun_esp.c: ESP front-end for Sparc SBUS systems.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/delay.h>
9#include <linux/module.h>
10#include <linux/init.h>
11
12#include <asm/irq.h>
13#include <asm/io.h>
14#include <asm/dma.h>
15
16#include <asm/sbus.h>
17
18#include <scsi/scsi_host.h>
19
20#include "esp_scsi.h"
21
22#define DRV_MODULE_NAME		"sun_esp"
23#define PFX DRV_MODULE_NAME	": "
24#define DRV_VERSION		"1.000"
25#define DRV_MODULE_RELDATE	"April 19, 2007"
26
27#define dma_read32(REG) \
28	sbus_readl(esp->dma_regs + (REG))
29#define dma_write32(VAL, REG) \
30	sbus_writel((VAL), esp->dma_regs + (REG))
31
32static int __devinit esp_sbus_find_dma(struct esp *esp, struct sbus_dev *dma_sdev)
33{
34	struct sbus_dev *sdev = esp->dev;
35	struct sbus_dma *dma;
36
37	if (dma_sdev != NULL) {
38		for_each_dvma(dma) {
39			if (dma->sdev == dma_sdev)
40				break;
41		}
42	} else {
43		for_each_dvma(dma) {
44			if (dma->sdev == NULL)
45				break;
46
47			/* If bus + slot are the same and it has the
48			 * correct OBP name, it's ours.
49			 */
50			if (sdev->bus == dma->sdev->bus &&
51			    sdev->slot == dma->sdev->slot &&
52			    (!strcmp(dma->sdev->prom_name, "dma") ||
53			     !strcmp(dma->sdev->prom_name, "espdma")))
54				break;
55		}
56	}
57
58	if (dma == NULL) {
59		printk(KERN_ERR PFX "[%s] Cannot find dma.\n",
60		       sdev->ofdev.node->full_name);
61		return -ENODEV;
62	}
63	esp->dma = dma;
64	esp->dma_regs = dma->regs;
65
66	return 0;
67
68}
69
70static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
71{
72	struct sbus_dev *sdev = esp->dev;
73	struct resource *res;
74
75	/* On HME, two reg sets exist, first is DVMA,
76	 * second is ESP registers.
77	 */
78	if (hme)
79		res = &sdev->resource[1];
80	else
81		res = &sdev->resource[0];
82
83	esp->regs = sbus_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
84	if (!esp->regs)
85		return -ENOMEM;
86
87	return 0;
88}
89
90static int __devinit esp_sbus_map_command_block(struct esp *esp)
91{
92	struct sbus_dev *sdev = esp->dev;
93
94	esp->command_block = sbus_alloc_consistent(sdev, 16,
95						   &esp->command_block_dma);
96	if (!esp->command_block)
97		return -ENOMEM;
98	return 0;
99}
100
101static int __devinit esp_sbus_register_irq(struct esp *esp)
102{
103	struct Scsi_Host *host = esp->host;
104	struct sbus_dev *sdev = esp->dev;
105
106	host->irq = sdev->irqs[0];
107	return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
108}
109
110static void __devinit esp_get_scsi_id(struct esp *esp)
111{
112	struct sbus_dev *sdev = esp->dev;
113	struct device_node *dp = sdev->ofdev.node;
114
115	esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
116	if (esp->scsi_id != 0xff)
117		goto done;
118
119	esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
120	if (esp->scsi_id != 0xff)
121		goto done;
122
123	if (!sdev->bus) {
124		/* SUN4 */
125		esp->scsi_id = 7;
126		goto done;
127	}
128
129	esp->scsi_id = of_getintprop_default(sdev->bus->ofdev.node,
130					     "scsi-initiator-id", 7);
131
132done:
133	esp->host->this_id = esp->scsi_id;
134	esp->scsi_id_mask = (1 << esp->scsi_id);
135}
136
137static void __devinit esp_get_differential(struct esp *esp)
138{
139	struct sbus_dev *sdev = esp->dev;
140	struct device_node *dp = sdev->ofdev.node;
141
142	if (of_find_property(dp, "differential", NULL))
143		esp->flags |= ESP_FLAG_DIFFERENTIAL;
144	else
145		esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
146}
147
148static void __devinit esp_get_clock_params(struct esp *esp)
149{
150	struct sbus_dev *sdev = esp->dev;
151	struct device_node *dp = sdev->ofdev.node;
152	struct device_node *bus_dp;
153	int fmhz;
154
155	bus_dp = NULL;
156	if (sdev != NULL && sdev->bus != NULL)
157		bus_dp = sdev->bus->ofdev.node;
158
159	fmhz = of_getintprop_default(dp, "clock-frequency", 0);
160	if (fmhz == 0)
161		fmhz = (!bus_dp) ? 0 :
162			of_getintprop_default(bus_dp, "clock-frequency", 0);
163
164	esp->cfreq = fmhz;
165}
166
167static void __devinit esp_get_bursts(struct esp *esp, struct sbus_dev *dma)
168{
169	struct sbus_dev *sdev = esp->dev;
170	struct device_node *dp = sdev->ofdev.node;
171	u8 bursts;
172
173	bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
174	if (dma) {
175		struct device_node *dma_dp = dma->ofdev.node;
176		u8 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
177		if (val != 0xff)
178			bursts &= val;
179	}
180
181	if (sdev->bus) {
182		u8 val = of_getintprop_default(sdev->bus->ofdev.node,
183					       "burst-sizes", 0xff);
184		if (val != 0xff)
185			bursts &= val;
186	}
187
188	if (bursts == 0xff ||
189	    (bursts & DMA_BURST16) == 0 ||
190	    (bursts & DMA_BURST32) == 0)
191		bursts = (DMA_BURST32 - 1);
192
193	esp->bursts = bursts;
194}
195
196static void __devinit esp_sbus_get_props(struct esp *esp, struct sbus_dev *espdma)
197{
198	esp_get_scsi_id(esp);
199	esp_get_differential(esp);
200	esp_get_clock_params(esp);
201	esp_get_bursts(esp, espdma);
202}
203
204static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
205{
206	sbus_writeb(val, esp->regs + (reg * 4UL));
207}
208
209static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
210{
211	return sbus_readb(esp->regs + (reg * 4UL));
212}
213
214static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
215				      size_t sz, int dir)
216{
217	return sbus_map_single(esp->dev, buf, sz, dir);
218}
219
220static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
221				  int num_sg, int dir)
222{
223	return sbus_map_sg(esp->dev, sg, num_sg, dir);
224}
225
226static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
227				  size_t sz, int dir)
228{
229	sbus_unmap_single(esp->dev, addr, sz, dir);
230}
231
232static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
233			      int num_sg, int dir)
234{
235	sbus_unmap_sg(esp->dev, sg, num_sg, dir);
236}
237
238static int sbus_esp_irq_pending(struct esp *esp)
239{
240	if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
241		return 1;
242	return 0;
243}
244
245static void sbus_esp_reset_dma(struct esp *esp)
246{
247	int can_do_burst16, can_do_burst32, can_do_burst64;
248	int can_do_sbus64, lim;
249	u32 val;
250
251	can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
252	can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
253	can_do_burst64 = 0;
254	can_do_sbus64 = 0;
255	if (sbus_can_dma_64bit(esp->dev))
256		can_do_sbus64 = 1;
257	if (sbus_can_burst64(esp->sdev))
258		can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
259
260	/* Put the DVMA into a known state. */
261	if (esp->dma->revision != dvmahme) {
262		val = dma_read32(DMA_CSR);
263		dma_write32(val | DMA_RST_SCSI, DMA_CSR);
264		dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
265	}
266	switch (esp->dma->revision) {
267	case dvmahme:
268		dma_write32(DMA_RESET_FAS366, DMA_CSR);
269		dma_write32(DMA_RST_SCSI, DMA_CSR);
270
271		esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
272					DMA_SCSI_DISAB | DMA_INT_ENAB);
273
274		esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
275					  DMA_BRST_SZ);
276
277		if (can_do_burst64)
278			esp->prev_hme_dmacsr |= DMA_BRST64;
279		else if (can_do_burst32)
280			esp->prev_hme_dmacsr |= DMA_BRST32;
281
282		if (can_do_sbus64) {
283			esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
284			sbus_set_sbus64(esp->dev, esp->bursts);
285		}
286
287		lim = 1000;
288		while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
289			if (--lim == 0) {
290				printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
291				       "will not clear!\n",
292				       esp->host->unique_id);
293				break;
294			}
295			udelay(1);
296		}
297
298		dma_write32(0, DMA_CSR);
299		dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
300
301		dma_write32(0, DMA_ADDR);
302		break;
303
304	case dvmarev2:
305		if (esp->rev != ESP100) {
306			val = dma_read32(DMA_CSR);
307			dma_write32(val | DMA_3CLKS, DMA_CSR);
308		}
309		break;
310
311	case dvmarev3:
312		val = dma_read32(DMA_CSR);
313		val &= ~DMA_3CLKS;
314		val |= DMA_2CLKS;
315		if (can_do_burst32) {
316			val &= ~DMA_BRST_SZ;
317			val |= DMA_BRST32;
318		}
319		dma_write32(val, DMA_CSR);
320		break;
321
322	case dvmaesc1:
323		val = dma_read32(DMA_CSR);
324		val |= DMA_ADD_ENABLE;
325		val &= ~DMA_BCNT_ENAB;
326		if (!can_do_burst32 && can_do_burst16) {
327			val |= DMA_ESC_BURST;
328		} else {
329			val &= ~(DMA_ESC_BURST);
330		}
331		dma_write32(val, DMA_CSR);
332		break;
333
334	default:
335		break;
336	}
337
338	/* Enable interrupts.  */
339	val = dma_read32(DMA_CSR);
340	dma_write32(val | DMA_INT_ENAB, DMA_CSR);
341}
342
343static void sbus_esp_dma_drain(struct esp *esp)
344{
345	u32 csr;
346	int lim;
347
348	if (esp->dma->revision == dvmahme)
349		return;
350
351	csr = dma_read32(DMA_CSR);
352	if (!(csr & DMA_FIFO_ISDRAIN))
353		return;
354
355	if (esp->dma->revision != dvmarev3 && esp->dma->revision != dvmaesc1)
356		dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
357
358	lim = 1000;
359	while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
360		if (--lim == 0) {
361			printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
362			       esp->host->unique_id);
363			break;
364		}
365		udelay(1);
366	}
367}
368
369static void sbus_esp_dma_invalidate(struct esp *esp)
370{
371	if (esp->dma->revision == dvmahme) {
372		dma_write32(DMA_RST_SCSI, DMA_CSR);
373
374		esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
375					 (DMA_PARITY_OFF | DMA_2CLKS |
376					  DMA_SCSI_DISAB | DMA_INT_ENAB)) &
377					~(DMA_ST_WRITE | DMA_ENABLE));
378
379		dma_write32(0, DMA_CSR);
380		dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
381
382		/* This is necessary to avoid having the SCSI channel
383		 * engine lock up on us.
384		 */
385		dma_write32(0, DMA_ADDR);
386	} else {
387		u32 val;
388		int lim;
389
390		lim = 1000;
391		while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
392			if (--lim == 0) {
393				printk(KERN_ALERT PFX "esp%d: DMA will not "
394				       "invalidate!\n", esp->host->unique_id);
395				break;
396			}
397			udelay(1);
398		}
399
400		val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
401		val |= DMA_FIFO_INV;
402		dma_write32(val, DMA_CSR);
403		val &= ~DMA_FIFO_INV;
404		dma_write32(val, DMA_CSR);
405	}
406}
407
408static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
409				  u32 dma_count, int write, u8 cmd)
410{
411	u32 csr;
412
413	BUG_ON(!(cmd & ESP_CMD_DMA));
414
415	sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
416	sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
417	if (esp->rev == FASHME) {
418		sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
419		sbus_esp_write8(esp, 0, FAS_RHI);
420
421		scsi_esp_cmd(esp, cmd);
422
423		csr = esp->prev_hme_dmacsr;
424		csr |= DMA_SCSI_DISAB | DMA_ENABLE;
425		if (write)
426			csr |= DMA_ST_WRITE;
427		else
428			csr &= ~DMA_ST_WRITE;
429		esp->prev_hme_dmacsr = csr;
430
431		dma_write32(dma_count, DMA_COUNT);
432		dma_write32(addr, DMA_ADDR);
433		dma_write32(csr, DMA_CSR);
434	} else {
435		csr = dma_read32(DMA_CSR);
436		csr |= DMA_ENABLE;
437		if (write)
438			csr |= DMA_ST_WRITE;
439		else
440			csr &= ~DMA_ST_WRITE;
441		dma_write32(csr, DMA_CSR);
442		if (esp->dma->revision == dvmaesc1) {
443			u32 end = PAGE_ALIGN(addr + dma_count + 16U);
444			dma_write32(end - addr, DMA_COUNT);
445		}
446		dma_write32(addr, DMA_ADDR);
447
448		scsi_esp_cmd(esp, cmd);
449	}
450
451}
452
453static int sbus_esp_dma_error(struct esp *esp)
454{
455	u32 csr = dma_read32(DMA_CSR);
456
457	if (csr & DMA_HNDL_ERROR)
458		return 1;
459
460	return 0;
461}
462
463static const struct esp_driver_ops sbus_esp_ops = {
464	.esp_write8	=	sbus_esp_write8,
465	.esp_read8	=	sbus_esp_read8,
466	.map_single	=	sbus_esp_map_single,
467	.map_sg		=	sbus_esp_map_sg,
468	.unmap_single	=	sbus_esp_unmap_single,
469	.unmap_sg	=	sbus_esp_unmap_sg,
470	.irq_pending	=	sbus_esp_irq_pending,
471	.reset_dma	=	sbus_esp_reset_dma,
472	.dma_drain	=	sbus_esp_dma_drain,
473	.dma_invalidate	=	sbus_esp_dma_invalidate,
474	.send_dma_cmd	=	sbus_esp_send_dma_cmd,
475	.dma_error	=	sbus_esp_dma_error,
476};
477
478static int __devinit esp_sbus_probe_one(struct device *dev,
479					struct sbus_dev *esp_dev,
480					struct sbus_dev *espdma,
481					struct sbus_bus *sbus,
482					int hme)
483{
484	struct scsi_host_template *tpnt = &scsi_esp_template;
485	struct Scsi_Host *host;
486	struct esp *esp;
487	int err;
488
489	host = scsi_host_alloc(tpnt, sizeof(struct esp));
490
491	err = -ENOMEM;
492	if (!host)
493		goto fail;
494
495	host->max_id = (hme ? 16 : 8);
496	esp = host_to_esp(host);
497
498	esp->host = host;
499	esp->dev = esp_dev;
500	esp->ops = &sbus_esp_ops;
501
502	if (hme)
503		esp->flags |= ESP_FLAG_WIDE_CAPABLE;
504
505	err = esp_sbus_find_dma(esp, espdma);
506	if (err < 0)
507		goto fail_unlink;
508
509	err = esp_sbus_map_regs(esp, hme);
510	if (err < 0)
511		goto fail_unlink;
512
513	err = esp_sbus_map_command_block(esp);
514	if (err < 0)
515		goto fail_unmap_regs;
516
517	err = esp_sbus_register_irq(esp);
518	if (err < 0)
519		goto fail_unmap_command_block;
520
521	esp_sbus_get_props(esp, espdma);
522
523	/* Before we try to touch the ESP chip, ESC1 dma can
524	 * come up with the reset bit set, so make sure that
525	 * is clear first.
526	 */
527	if (esp->dma->revision == dvmaesc1) {
528		u32 val = dma_read32(DMA_CSR);
529
530		dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
531	}
532
533	dev_set_drvdata(&esp_dev->ofdev.dev, esp);
534
535	err = scsi_esp_register(esp, dev);
536	if (err)
537		goto fail_free_irq;
538
539	return 0;
540
541fail_free_irq:
542	free_irq(host->irq, esp);
543fail_unmap_command_block:
544	sbus_free_consistent(esp->dev, 16,
545			     esp->command_block,
546			     esp->command_block_dma);
547fail_unmap_regs:
548	sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE);
549fail_unlink:
550	scsi_host_put(host);
551fail:
552	return err;
553}
554
555static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match)
556{
557	struct sbus_dev *sdev = to_sbus_device(&dev->dev);
558	struct device_node *dp = dev->node;
559	struct sbus_dev *dma_sdev = NULL;
560	int hme = 0;
561
562	if (dp->parent &&
563	    (!strcmp(dp->parent->name, "espdma") ||
564	     !strcmp(dp->parent->name, "dma")))
565		dma_sdev = sdev->parent;
566	else if (!strcmp(dp->name, "SUNW,fas")) {
567		dma_sdev = sdev;
568		hme = 1;
569	}
570
571	return esp_sbus_probe_one(&dev->dev, sdev, dma_sdev,
572				  sdev->bus, hme);
573}
574
575static int __devexit esp_sbus_remove(struct of_device *dev)
576{
577	struct esp *esp = dev_get_drvdata(&dev->dev);
578	unsigned int irq = esp->host->irq;
579	u32 val;
580
581	scsi_esp_unregister(esp);
582
583	/* Disable interrupts.  */
584	val = dma_read32(DMA_CSR);
585	dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
586
587	free_irq(irq, esp);
588	sbus_free_consistent(esp->dev, 16,
589			     esp->command_block,
590			     esp->command_block_dma);
591	sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE);
592
593	scsi_host_put(esp->host);
594
595	return 0;
596}
597
598static struct of_device_id esp_match[] = {
599	{
600		.name = "SUNW,esp",
601	},
602	{
603		.name = "SUNW,fas",
604	},
605	{
606		.name = "esp",
607	},
608	{},
609};
610MODULE_DEVICE_TABLE(of, esp_match);
611
612static struct of_platform_driver esp_sbus_driver = {
613	.name		= "esp",
614	.match_table	= esp_match,
615	.probe		= esp_sbus_probe,
616	.remove		= __devexit_p(esp_sbus_remove),
617};
618
619static int __init sunesp_init(void)
620{
621	return of_register_driver(&esp_sbus_driver, &sbus_bus_type);
622}
623
624static void __exit sunesp_exit(void)
625{
626	of_unregister_driver(&esp_sbus_driver);
627}
628
629MODULE_DESCRIPTION("Sun ESP SCSI driver");
630MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
631MODULE_LICENSE("GPL");
632MODULE_VERSION(DRV_VERSION);
633
634module_init(sunesp_init);
635module_exit(sunesp_exit);
636