1/* Low-level parallel port routines for built-in port on SGI IP32
2 *
3 * Author: Arnaud Giersch <arnaud.giersch@free.fr>
4 *
5 * Based on parport_pc.c by
6 *	Phil Blundell, Tim Waugh, Jose Renau, David Campbell,
7 *	Andrea Arcangeli, et al.
8 *
9 * Thanks to Ilya A. Volynets-Evenbakh for his help.
10 *
11 * Copyright (C) 2005, 2006 Arnaud Giersch.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful, but WITHOUT
19 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
20 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
21 * more details.
22 *
23 * You should have received a copy of the GNU General Public License along
24 * with this program; if not, write to the Free Software Foundation, Inc., 59
25 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 */
27
28/* Current status:
29 *
30 *	Basic SPP and PS2 modes are supported.
31 *	Support for parallel port IRQ is present.
32 *	Hardware SPP (a.k.a. compatibility), EPP, and ECP modes are
33 *	supported.
34 *	SPP/ECP FIFO can be driven in PIO or DMA mode.  PIO mode can work with
35 *	or without interrupt support.
36 *
37 *	Hardware ECP mode is not fully implemented (ecp_read_data and
38 *	ecp_write_addr are actually missing).
39 *
40 * To do:
41 *
42 *	Fully implement ECP mode.
43 *	EPP and ECP mode need to be tested.  I currently do not own any
44 *	peripheral supporting these extended mode, and cannot test them.
45 *	If DMA mode works well, decide if support for PIO FIFO modes should be
46 *	dropped.
47 *	Use the io{read,write} family functions when they become available in
48 *	the linux-mips.org tree.  Note: the MIPS specific functions readsb()
49 *	and writesb() are to be translated by ioread8_rep() and iowrite8_rep()
50 *	respectively.
51 */
52
53/* The built-in parallel port on the SGI 02 workstation (a.k.a. IP32) is an
54 * IEEE 1284 parallel port driven by a Texas Instrument TL16PIR552PH chip[1].
55 * This chip supports SPP, bidirectional, EPP and ECP modes.  It has a 16 byte
56 * FIFO buffer and supports DMA transfers.
57 *
58 * [1] http://focus.ti.com/docs/prod/folders/print/tl16pir552.html
59 *
60 * Theoretically, we could simply use the parport_pc module.  It is however
61 * not so simple.  The parport_pc code assumes that the parallel port
62 * registers are port-mapped.  On the O2, they are memory-mapped.
63 * Furthermore, each register is replicated on 256 consecutive addresses (as
64 * it is for the built-in serial ports on the same chip).
65 */
66
67/*--- Some configuration defines ---------------------------------------*/
68
69/* DEBUG_PARPORT_IP32
70 *	0	disable debug
71 *	1	standard level: pr_debug1 is enabled
72 *	2	parport_ip32_dump_state is enabled
73 *	>=3	verbose level: pr_debug is enabled
74 */
75#if !defined(DEBUG_PARPORT_IP32)
76#	define DEBUG_PARPORT_IP32  0	/* 0 (disabled) for production */
77#endif
78
79/*----------------------------------------------------------------------*/
80
81/* Setup DEBUG macros.  This is done before any includes, just in case we
82 * activate pr_debug() with DEBUG_PARPORT_IP32 >= 3.
83 */
84#if DEBUG_PARPORT_IP32 == 1
85#	warning DEBUG_PARPORT_IP32 == 1
86#elif DEBUG_PARPORT_IP32 == 2
87#	warning DEBUG_PARPORT_IP32 == 2
88#elif DEBUG_PARPORT_IP32 >= 3
89#	warning DEBUG_PARPORT_IP32 >= 3
90#	if !defined(DEBUG)
91#		define DEBUG /* enable pr_debug() in kernel.h */
92#	endif
93#endif
94
95#include <linux/completion.h>
96#include <linux/delay.h>
97#include <linux/dma-mapping.h>
98#include <linux/err.h>
99#include <linux/init.h>
100#include <linux/interrupt.h>
101#include <linux/jiffies.h>
102#include <linux/kernel.h>
103#include <linux/module.h>
104#include <linux/parport.h>
105#include <linux/sched.h>
106#include <linux/spinlock.h>
107#include <linux/stddef.h>
108#include <linux/types.h>
109#include <asm/io.h>
110#include <asm/ip32/ip32_ints.h>
111#include <asm/ip32/mace.h>
112
113/*--- Global variables -------------------------------------------------*/
114
115/* Verbose probing on by default for debugging. */
116#if DEBUG_PARPORT_IP32 >= 1
117#	define DEFAULT_VERBOSE_PROBING	1
118#else
119#	define DEFAULT_VERBOSE_PROBING	0
120#endif
121
122/* Default prefix for printk */
123#define PPIP32 "parport_ip32: "
124
125/*
126 * These are the module parameters:
127 * @features:		bit mask of features to enable/disable
128 *			(all enabled by default)
129 * @verbose_probing:	log chit-chat during initialization
130 */
131#define PARPORT_IP32_ENABLE_IRQ	(1U << 0)
132#define PARPORT_IP32_ENABLE_DMA	(1U << 1)
133#define PARPORT_IP32_ENABLE_SPP	(1U << 2)
134#define PARPORT_IP32_ENABLE_EPP	(1U << 3)
135#define PARPORT_IP32_ENABLE_ECP	(1U << 4)
136static unsigned int features =	~0U;
137static int verbose_probing =	DEFAULT_VERBOSE_PROBING;
138
139/* We do not support more than one port. */
140static struct parport *this_port = NULL;
141
142/* Timing constants for FIFO modes.  */
143#define FIFO_NFAULT_TIMEOUT	100	/* milliseconds */
144#define FIFO_POLLING_INTERVAL	50	/* microseconds */
145
146/*--- I/O register definitions -----------------------------------------*/
147
148/**
149 * struct parport_ip32_regs - virtual addresses of parallel port registers
150 * @data:	Data Register
151 * @dsr:	Device Status Register
152 * @dcr:	Device Control Register
153 * @eppAddr:	EPP Address Register
154 * @eppData0:	EPP Data Register 0
155 * @eppData1:	EPP Data Register 1
156 * @eppData2:	EPP Data Register 2
157 * @eppData3:	EPP Data Register 3
158 * @ecpAFifo:	ECP Address FIFO
159 * @fifo:	General FIFO register.  The same address is used for:
160 *		- cFifo, the Parallel Port DATA FIFO
161 *		- ecpDFifo, the ECP Data FIFO
162 *		- tFifo, the ECP Test FIFO
163 * @cnfgA:	Configuration Register A
164 * @cnfgB:	Configuration Register B
165 * @ecr:	Extended Control Register
166 */
167struct parport_ip32_regs {
168	void __iomem *data;
169	void __iomem *dsr;
170	void __iomem *dcr;
171	void __iomem *eppAddr;
172	void __iomem *eppData0;
173	void __iomem *eppData1;
174	void __iomem *eppData2;
175	void __iomem *eppData3;
176	void __iomem *ecpAFifo;
177	void __iomem *fifo;
178	void __iomem *cnfgA;
179	void __iomem *cnfgB;
180	void __iomem *ecr;
181};
182
183/* Device Status Register */
184#define DSR_nBUSY		(1U << 7)	/* PARPORT_STATUS_BUSY */
185#define DSR_nACK		(1U << 6)	/* PARPORT_STATUS_ACK */
186#define DSR_PERROR		(1U << 5)	/* PARPORT_STATUS_PAPEROUT */
187#define DSR_SELECT		(1U << 4)	/* PARPORT_STATUS_SELECT */
188#define DSR_nFAULT		(1U << 3)	/* PARPORT_STATUS_ERROR */
189#define DSR_nPRINT		(1U << 2)	/* specific to TL16PIR552 */
190/* #define DSR_reserved		(1U << 1) */
191#define DSR_TIMEOUT		(1U << 0)	/* EPP timeout */
192
193/* Device Control Register */
194/* #define DCR_reserved		(1U << 7) | (1U <<  6) */
195#define DCR_DIR			(1U << 5)	/* direction */
196#define DCR_IRQ			(1U << 4)	/* interrupt on nAck */
197#define DCR_SELECT		(1U << 3)	/* PARPORT_CONTROL_SELECT */
198#define DCR_nINIT		(1U << 2)	/* PARPORT_CONTROL_INIT */
199#define DCR_AUTOFD		(1U << 1)	/* PARPORT_CONTROL_AUTOFD */
200#define DCR_STROBE		(1U << 0)	/* PARPORT_CONTROL_STROBE */
201
202/* ECP Configuration Register A */
203#define CNFGA_IRQ		(1U << 7)
204#define CNFGA_ID_MASK		((1U << 6) | (1U << 5) | (1U << 4))
205#define CNFGA_ID_SHIFT		4
206#define CNFGA_ID_16		(00U << CNFGA_ID_SHIFT)
207#define CNFGA_ID_8		(01U << CNFGA_ID_SHIFT)
208#define CNFGA_ID_32		(02U << CNFGA_ID_SHIFT)
209/* #define CNFGA_reserved	(1U << 3) */
210#define CNFGA_nBYTEINTRANS	(1U << 2)
211#define CNFGA_PWORDLEFT		((1U << 1) | (1U << 0))
212
213/* ECP Configuration Register B */
214#define CNFGB_COMPRESS		(1U << 7)
215#define CNFGB_INTRVAL		(1U << 6)
216#define CNFGB_IRQ_MASK		((1U << 5) | (1U << 4) | (1U << 3))
217#define CNFGB_IRQ_SHIFT		3
218#define CNFGB_DMA_MASK		((1U << 2) | (1U << 1) | (1U << 0))
219#define CNFGB_DMA_SHIFT		0
220
221/* Extended Control Register */
222#define ECR_MODE_MASK		((1U << 7) | (1U << 6) | (1U << 5))
223#define ECR_MODE_SHIFT		5
224#define ECR_MODE_SPP		(00U << ECR_MODE_SHIFT)
225#define ECR_MODE_PS2		(01U << ECR_MODE_SHIFT)
226#define ECR_MODE_PPF		(02U << ECR_MODE_SHIFT)
227#define ECR_MODE_ECP		(03U << ECR_MODE_SHIFT)
228#define ECR_MODE_EPP		(04U << ECR_MODE_SHIFT)
229/* #define ECR_MODE_reserved	(05U << ECR_MODE_SHIFT) */
230#define ECR_MODE_TST		(06U << ECR_MODE_SHIFT)
231#define ECR_MODE_CFG		(07U << ECR_MODE_SHIFT)
232#define ECR_nERRINTR		(1U << 4)
233#define ECR_DMAEN		(1U << 3)
234#define ECR_SERVINTR		(1U << 2)
235#define ECR_F_FULL		(1U << 1)
236#define ECR_F_EMPTY		(1U << 0)
237
238/*--- Private data -----------------------------------------------------*/
239
240/**
241 * enum parport_ip32_irq_mode - operation mode of interrupt handler
242 * @PARPORT_IP32_IRQ_FWD:	forward interrupt to the upper parport layer
243 * @PARPORT_IP32_IRQ_HERE:	interrupt is handled locally
244 */
245enum parport_ip32_irq_mode { PARPORT_IP32_IRQ_FWD, PARPORT_IP32_IRQ_HERE };
246
247/**
248 * struct parport_ip32_private - private stuff for &struct parport
249 * @regs:		register addresses
250 * @dcr_cache:		cached contents of DCR
251 * @dcr_writable:	bit mask of writable DCR bits
252 * @pword:		number of bytes per PWord
253 * @fifo_depth:		number of PWords that FIFO will hold
254 * @readIntrThreshold:	minimum number of PWords we can read
255 *			if we get an interrupt
256 * @writeIntrThreshold:	minimum number of PWords we can write
257 *			if we get an interrupt
258 * @irq_mode:		operation mode of interrupt handler for this port
259 * @irq_complete:	mutex used to wait for an interrupt to occur
260 */
261struct parport_ip32_private {
262	struct parport_ip32_regs	regs;
263	unsigned int			dcr_cache;
264	unsigned int			dcr_writable;
265	unsigned int			pword;
266	unsigned int			fifo_depth;
267	unsigned int			readIntrThreshold;
268	unsigned int			writeIntrThreshold;
269	enum parport_ip32_irq_mode	irq_mode;
270	struct completion		irq_complete;
271};
272
273/*--- Debug code -------------------------------------------------------*/
274
275/*
276 * pr_debug1 - print debug messages
277 *
278 * This is like pr_debug(), but is defined for %DEBUG_PARPORT_IP32 >= 1
279 */
280#if DEBUG_PARPORT_IP32 >= 1
281#	define pr_debug1(...)	printk(KERN_DEBUG __VA_ARGS__)
282#else /* DEBUG_PARPORT_IP32 < 1 */
283#	define pr_debug1(...)	do { } while (0)
284#endif
285
286/*
287 * pr_trace, pr_trace1 - trace function calls
288 * @p:		pointer to &struct parport
289 * @fmt:	printk format string
290 * @...:	parameters for format string
291 *
292 * Macros used to trace function calls.  The given string is formatted after
293 * function name.  pr_trace() uses pr_debug(), and pr_trace1() uses
294 * pr_debug1().  __pr_trace() is the low-level macro and is not to be used
295 * directly.
296 */
297#define __pr_trace(pr, p, fmt, ...)					\
298	pr("%s: %s" fmt "\n",						\
299	   ({ const struct parport *__p = (p);				\
300		   __p ? __p->name : "parport_ip32"; }),		\
301	   __func__ , ##__VA_ARGS__)
302#define pr_trace(p, fmt, ...)	__pr_trace(pr_debug, p, fmt , ##__VA_ARGS__)
303#define pr_trace1(p, fmt, ...)	__pr_trace(pr_debug1, p, fmt , ##__VA_ARGS__)
304
305/*
306 * __pr_probe, pr_probe - print message if @verbose_probing is true
307 * @p:		pointer to &struct parport
308 * @fmt:	printk format string
309 * @...:	parameters for format string
310 *
311 * For new lines, use pr_probe().  Use __pr_probe() for continued lines.
312 */
313#define __pr_probe(...)							\
314	do { if (verbose_probing) printk(__VA_ARGS__); } while (0)
315#define pr_probe(p, fmt, ...)						\
316	__pr_probe(KERN_INFO PPIP32 "0x%lx: " fmt, (p)->base , ##__VA_ARGS__)
317
318/*
319 * parport_ip32_dump_state - print register status of parport
320 * @p:		pointer to &struct parport
321 * @str:	string to add in message
322 * @show_ecp_config:	shall we dump ECP configuration registers too?
323 *
324 * This function is only here for debugging purpose, and should be used with
325 * care.  Reading the parallel port registers may have undesired side effects.
326 * Especially if @show_ecp_config is true, the parallel port is resetted.
327 * This function is only defined if %DEBUG_PARPORT_IP32 >= 2.
328 */
329#if DEBUG_PARPORT_IP32 >= 2
330static void parport_ip32_dump_state(struct parport *p, char *str,
331				    unsigned int show_ecp_config)
332{
333	struct parport_ip32_private * const priv = p->physport->private_data;
334	unsigned int i;
335
336	printk(KERN_DEBUG PPIP32 "%s: state (%s):\n", p->name, str);
337	{
338		static const char ecr_modes[8][4] = {"SPP", "PS2", "PPF",
339						     "ECP", "EPP", "???",
340						     "TST", "CFG"};
341		unsigned int ecr = readb(priv->regs.ecr);
342		printk(KERN_DEBUG PPIP32 "    ecr=0x%02x", ecr);
343		printk(" %s",
344		       ecr_modes[(ecr & ECR_MODE_MASK) >> ECR_MODE_SHIFT]);
345		if (ecr & ECR_nERRINTR)
346			printk(",nErrIntrEn");
347		if (ecr & ECR_DMAEN)
348			printk(",dmaEn");
349		if (ecr & ECR_SERVINTR)
350			printk(",serviceIntr");
351		if (ecr & ECR_F_FULL)
352			printk(",f_full");
353		if (ecr & ECR_F_EMPTY)
354			printk(",f_empty");
355		printk("\n");
356	}
357	if (show_ecp_config) {
358		unsigned int oecr, cnfgA, cnfgB;
359		oecr = readb(priv->regs.ecr);
360		writeb(ECR_MODE_PS2, priv->regs.ecr);
361		writeb(ECR_MODE_CFG, priv->regs.ecr);
362		cnfgA = readb(priv->regs.cnfgA);
363		cnfgB = readb(priv->regs.cnfgB);
364		writeb(ECR_MODE_PS2, priv->regs.ecr);
365		writeb(oecr, priv->regs.ecr);
366		printk(KERN_DEBUG PPIP32 "    cnfgA=0x%02x", cnfgA);
367		printk(" ISA-%s", (cnfgA & CNFGA_IRQ) ? "Level" : "Pulses");
368		switch (cnfgA & CNFGA_ID_MASK) {
369		case CNFGA_ID_8:
370			printk(",8 bits");
371			break;
372		case CNFGA_ID_16:
373			printk(",16 bits");
374			break;
375		case CNFGA_ID_32:
376			printk(",32 bits");
377			break;
378		default:
379			printk(",unknown ID");
380			break;
381		}
382		if (!(cnfgA & CNFGA_nBYTEINTRANS))
383			printk(",ByteInTrans");
384		if ((cnfgA & CNFGA_ID_MASK) != CNFGA_ID_8)
385			printk(",%d byte%s left", cnfgA & CNFGA_PWORDLEFT,
386			       ((cnfgA & CNFGA_PWORDLEFT) > 1) ? "s" : "");
387		printk("\n");
388		printk(KERN_DEBUG PPIP32 "    cnfgB=0x%02x", cnfgB);
389		printk(" irq=%u,dma=%u",
390		       (cnfgB & CNFGB_IRQ_MASK) >> CNFGB_IRQ_SHIFT,
391		       (cnfgB & CNFGB_DMA_MASK) >> CNFGB_DMA_SHIFT);
392		printk(",intrValue=%d", !!(cnfgB & CNFGB_INTRVAL));
393		if (cnfgB & CNFGB_COMPRESS)
394			printk(",compress");
395		printk("\n");
396	}
397	for (i = 0; i < 2; i++) {
398		unsigned int dcr = i ? priv->dcr_cache : readb(priv->regs.dcr);
399		printk(KERN_DEBUG PPIP32 "    dcr(%s)=0x%02x",
400		       i ? "soft" : "hard", dcr);
401		printk(" %s", (dcr & DCR_DIR) ? "rev" : "fwd");
402		if (dcr & DCR_IRQ)
403			printk(",ackIntEn");
404		if (!(dcr & DCR_SELECT))
405			printk(",nSelectIn");
406		if (dcr & DCR_nINIT)
407			printk(",nInit");
408		if (!(dcr & DCR_AUTOFD))
409			printk(",nAutoFD");
410		if (!(dcr & DCR_STROBE))
411			printk(",nStrobe");
412		printk("\n");
413	}
414#define sep (f++ ? ',' : ' ')
415	{
416		unsigned int f = 0;
417		unsigned int dsr = readb(priv->regs.dsr);
418		printk(KERN_DEBUG PPIP32 "    dsr=0x%02x", dsr);
419		if (!(dsr & DSR_nBUSY))
420			printk("%cBusy", sep);
421		if (dsr & DSR_nACK)
422			printk("%cnAck", sep);
423		if (dsr & DSR_PERROR)
424			printk("%cPError", sep);
425		if (dsr & DSR_SELECT)
426			printk("%cSelect", sep);
427		if (dsr & DSR_nFAULT)
428			printk("%cnFault", sep);
429		if (!(dsr & DSR_nPRINT))
430			printk("%c(Print)", sep);
431		if (dsr & DSR_TIMEOUT)
432			printk("%cTimeout", sep);
433		printk("\n");
434	}
435#undef sep
436}
437#else /* DEBUG_PARPORT_IP32 < 2 */
438#define parport_ip32_dump_state(...)	do { } while (0)
439#endif
440
441/*
442 * CHECK_EXTRA_BITS - track and log extra bits
443 * @p:		pointer to &struct parport
444 * @b:		byte to inspect
445 * @m:		bit mask of authorized bits
446 *
447 * This is used to track and log extra bits that should not be there in
448 * parport_ip32_write_control() and parport_ip32_frob_control().  It is only
449 * defined if %DEBUG_PARPORT_IP32 >= 1.
450 */
451#if DEBUG_PARPORT_IP32 >= 1
452#define CHECK_EXTRA_BITS(p, b, m)					\
453	do {								\
454		unsigned int __b = (b), __m = (m);			\
455		if (__b & ~__m)						\
456			pr_debug1(PPIP32 "%s: extra bits in %s(%s): "	\
457				  "0x%02x/0x%02x\n",			\
458				  (p)->name, __func__, #b, __b, __m);	\
459	} while (0)
460#else /* DEBUG_PARPORT_IP32 < 1 */
461#define CHECK_EXTRA_BITS(...)	do { } while (0)
462#endif
463
464/*--- IP32 parallel port DMA operations --------------------------------*/
465
466/**
467 * struct parport_ip32_dma_data - private data needed for DMA operation
468 * @dir:	DMA direction (from or to device)
469 * @buf:	buffer physical address
470 * @len:	buffer length
471 * @next:	address of next bytes to DMA transfer
472 * @left:	number of bytes remaining
473 * @ctx:	next context to write (0: context_a; 1: context_b)
474 * @irq_on:	are the DMA IRQs currently enabled?
475 * @lock:	spinlock to protect access to the structure
476 */
477struct parport_ip32_dma_data {
478	enum dma_data_direction		dir;
479	dma_addr_t			buf;
480	dma_addr_t			next;
481	size_t				len;
482	size_t				left;
483	unsigned int			ctx;
484	unsigned int			irq_on;
485	spinlock_t			lock;
486};
487static struct parport_ip32_dma_data parport_ip32_dma;
488
489/**
490 * parport_ip32_dma_setup_context - setup next DMA context
491 * @limit:	maximum data size for the context
492 *
493 * The alignment constraints must be verified in caller function, and the
494 * parameter @limit must be set accordingly.
495 */
496static void parport_ip32_dma_setup_context(unsigned int limit)
497{
498	unsigned long flags;
499
500	spin_lock_irqsave(&parport_ip32_dma.lock, flags);
501	if (parport_ip32_dma.left > 0) {
502		/* Note: ctxreg is "volatile" here only because
503		 * mace->perif.ctrl.parport.context_a and context_b are
504		 * "volatile".  */
505		volatile u64 __iomem *ctxreg = (parport_ip32_dma.ctx == 0) ?
506			&mace->perif.ctrl.parport.context_a :
507			&mace->perif.ctrl.parport.context_b;
508		u64 count;
509		u64 ctxval;
510		if (parport_ip32_dma.left <= limit) {
511			count = parport_ip32_dma.left;
512			ctxval = MACEPAR_CONTEXT_LASTFLAG;
513		} else {
514			count = limit;
515			ctxval = 0;
516		}
517
518		pr_trace(NULL,
519			 "(%u): 0x%04x:0x%04x, %u -> %u%s",
520			 limit,
521			 (unsigned int)parport_ip32_dma.buf,
522			 (unsigned int)parport_ip32_dma.next,
523			 (unsigned int)count,
524			 parport_ip32_dma.ctx, ctxval ? "*" : "");
525
526		ctxval |= parport_ip32_dma.next &
527			MACEPAR_CONTEXT_BASEADDR_MASK;
528		ctxval |= ((count - 1) << MACEPAR_CONTEXT_DATALEN_SHIFT) &
529			MACEPAR_CONTEXT_DATALEN_MASK;
530		writeq(ctxval, ctxreg);
531		parport_ip32_dma.next += count;
532		parport_ip32_dma.left -= count;
533		parport_ip32_dma.ctx ^= 1U;
534	}
535	/* If there is nothing more to send, disable IRQs to avoid to
536	 * face an IRQ storm which can lock the machine.  Disable them
537	 * only once. */
538	if (parport_ip32_dma.left == 0 && parport_ip32_dma.irq_on) {
539		pr_debug(PPIP32 "IRQ off (ctx)\n");
540		disable_irq_nosync(MACEISA_PAR_CTXA_IRQ);
541		disable_irq_nosync(MACEISA_PAR_CTXB_IRQ);
542		parport_ip32_dma.irq_on = 0;
543	}
544	spin_unlock_irqrestore(&parport_ip32_dma.lock, flags);
545}
546
547/**
548 * parport_ip32_dma_interrupt - DMA interrupt handler
549 * @irq:	interrupt number
550 * @dev_id:	unused
551 */
552static irqreturn_t parport_ip32_dma_interrupt(int irq, void *dev_id)
553{
554	if (parport_ip32_dma.left)
555		pr_trace(NULL, "(%d): ctx=%d", irq, parport_ip32_dma.ctx);
556	parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND);
557	return IRQ_HANDLED;
558}
559
560#if DEBUG_PARPORT_IP32
561static irqreturn_t parport_ip32_merr_interrupt(int irq, void *dev_id)
562{
563	pr_trace1(NULL, "(%d)", irq);
564	return IRQ_HANDLED;
565}
566#endif
567
568/**
569 * parport_ip32_dma_start - begins a DMA transfer
570 * @dir:	DMA direction: DMA_TO_DEVICE or DMA_FROM_DEVICE
571 * @addr:	pointer to data buffer
572 * @count:	buffer size
573 *
574 * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be
575 * correctly balanced.
576 */
577static int parport_ip32_dma_start(enum dma_data_direction dir,
578				  void *addr, size_t count)
579{
580	unsigned int limit;
581	u64 ctrl;
582
583	pr_trace(NULL, "(%d, %lu)", dir, (unsigned long)count);
584
585	BUG_ON(dir != DMA_TO_DEVICE);
586
587	/* Reset DMA controller */
588	ctrl = MACEPAR_CTLSTAT_RESET;
589	writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
590
591	/* DMA IRQs should normally be enabled */
592	if (!parport_ip32_dma.irq_on) {
593		WARN_ON(1);
594		enable_irq(MACEISA_PAR_CTXA_IRQ);
595		enable_irq(MACEISA_PAR_CTXB_IRQ);
596		parport_ip32_dma.irq_on = 1;
597	}
598
599	/* Prepare DMA pointers */
600	parport_ip32_dma.dir = dir;
601	parport_ip32_dma.buf = dma_map_single(NULL, addr, count, dir);
602	parport_ip32_dma.len = count;
603	parport_ip32_dma.next = parport_ip32_dma.buf;
604	parport_ip32_dma.left = parport_ip32_dma.len;
605	parport_ip32_dma.ctx = 0;
606
607	/* Setup DMA direction and first two contexts */
608	ctrl = (dir == DMA_TO_DEVICE) ? 0 : MACEPAR_CTLSTAT_DIRECTION;
609	writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
610	/* Single transfer should not cross a 4K page boundary */
611	limit = MACEPAR_CONTEXT_DATA_BOUND -
612		(parport_ip32_dma.next & (MACEPAR_CONTEXT_DATA_BOUND - 1));
613	parport_ip32_dma_setup_context(limit);
614	parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND);
615
616	/* Real start of DMA transfer */
617	ctrl |= MACEPAR_CTLSTAT_ENABLE;
618	writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
619
620	return 0;
621}
622
623/**
624 * parport_ip32_dma_stop - ends a running DMA transfer
625 *
626 * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be
627 * correctly balanced.
628 */
629static void parport_ip32_dma_stop(void)
630{
631	u64 ctx_a;
632	u64 ctx_b;
633	u64 ctrl;
634	u64 diag;
635	size_t res[2];	/* {[0] = res_a, [1] = res_b} */
636
637	pr_trace(NULL, "()");
638
639	/* Disable IRQs */
640	spin_lock_irq(&parport_ip32_dma.lock);
641	if (parport_ip32_dma.irq_on) {
642		pr_debug(PPIP32 "IRQ off (stop)\n");
643		disable_irq_nosync(MACEISA_PAR_CTXA_IRQ);
644		disable_irq_nosync(MACEISA_PAR_CTXB_IRQ);
645		parport_ip32_dma.irq_on = 0;
646	}
647	spin_unlock_irq(&parport_ip32_dma.lock);
648	/* Force IRQ synchronization, even if the IRQs were disabled
649	 * elsewhere. */
650	synchronize_irq(MACEISA_PAR_CTXA_IRQ);
651	synchronize_irq(MACEISA_PAR_CTXB_IRQ);
652
653	/* Stop DMA transfer */
654	ctrl = readq(&mace->perif.ctrl.parport.cntlstat);
655	ctrl &= ~MACEPAR_CTLSTAT_ENABLE;
656	writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
657
658	/* Adjust residue (parport_ip32_dma.left) */
659	ctx_a = readq(&mace->perif.ctrl.parport.context_a);
660	ctx_b = readq(&mace->perif.ctrl.parport.context_b);
661	ctrl = readq(&mace->perif.ctrl.parport.cntlstat);
662	diag = readq(&mace->perif.ctrl.parport.diagnostic);
663	res[0] = (ctrl & MACEPAR_CTLSTAT_CTXA_VALID) ?
664		1 + ((ctx_a & MACEPAR_CONTEXT_DATALEN_MASK) >>
665		     MACEPAR_CONTEXT_DATALEN_SHIFT) :
666		0;
667	res[1] = (ctrl & MACEPAR_CTLSTAT_CTXB_VALID) ?
668		1 + ((ctx_b & MACEPAR_CONTEXT_DATALEN_MASK) >>
669		     MACEPAR_CONTEXT_DATALEN_SHIFT) :
670		0;
671	if (diag & MACEPAR_DIAG_DMACTIVE)
672		res[(diag & MACEPAR_DIAG_CTXINUSE) != 0] =
673			1 + ((diag & MACEPAR_DIAG_CTRMASK) >>
674			     MACEPAR_DIAG_CTRSHIFT);
675	parport_ip32_dma.left += res[0] + res[1];
676
677	/* Reset DMA controller, and re-enable IRQs */
678	ctrl = MACEPAR_CTLSTAT_RESET;
679	writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
680	pr_debug(PPIP32 "IRQ on (stop)\n");
681	enable_irq(MACEISA_PAR_CTXA_IRQ);
682	enable_irq(MACEISA_PAR_CTXB_IRQ);
683	parport_ip32_dma.irq_on = 1;
684
685	dma_unmap_single(NULL, parport_ip32_dma.buf, parport_ip32_dma.len,
686			 parport_ip32_dma.dir);
687}
688
689/**
690 * parport_ip32_dma_get_residue - get residue from last DMA transfer
691 *
692 * Returns the number of bytes remaining from last DMA transfer.
693 */
694static inline size_t parport_ip32_dma_get_residue(void)
695{
696	return parport_ip32_dma.left;
697}
698
699/**
700 * parport_ip32_dma_register - initialize DMA engine
701 *
702 * Returns zero for success.
703 */
704static int parport_ip32_dma_register(void)
705{
706	int err;
707
708	spin_lock_init(&parport_ip32_dma.lock);
709	parport_ip32_dma.irq_on = 1;
710
711	/* Reset DMA controller */
712	writeq(MACEPAR_CTLSTAT_RESET, &mace->perif.ctrl.parport.cntlstat);
713
714	/* Request IRQs */
715	err = request_irq(MACEISA_PAR_CTXA_IRQ, parport_ip32_dma_interrupt,
716			  0, "parport_ip32", NULL);
717	if (err)
718		goto fail_a;
719	err = request_irq(MACEISA_PAR_CTXB_IRQ, parport_ip32_dma_interrupt,
720			  0, "parport_ip32", NULL);
721	if (err)
722		goto fail_b;
723#if DEBUG_PARPORT_IP32
724	err = request_irq(MACEISA_PAR_MERR_IRQ, parport_ip32_merr_interrupt,
725			  0, "parport_ip32", NULL);
726	if (err)
727		goto fail_merr;
728#endif
729	return 0;
730
731#if DEBUG_PARPORT_IP32
732fail_merr:
733	free_irq(MACEISA_PAR_CTXB_IRQ, NULL);
734#endif
735fail_b:
736	free_irq(MACEISA_PAR_CTXA_IRQ, NULL);
737fail_a:
738	return err;
739}
740
741/**
742 * parport_ip32_dma_unregister - release and free resources for DMA engine
743 */
744static void parport_ip32_dma_unregister(void)
745{
746#if DEBUG_PARPORT_IP32
747	free_irq(MACEISA_PAR_MERR_IRQ, NULL);
748#endif
749	free_irq(MACEISA_PAR_CTXB_IRQ, NULL);
750	free_irq(MACEISA_PAR_CTXA_IRQ, NULL);
751}
752
753/*--- Interrupt handlers and associates --------------------------------*/
754
755/**
756 * parport_ip32_wakeup - wakes up code waiting for an interrupt
757 * @p:		pointer to &struct parport
758 */
759static inline void parport_ip32_wakeup(struct parport *p)
760{
761	struct parport_ip32_private * const priv = p->physport->private_data;
762	complete(&priv->irq_complete);
763}
764
765/**
766 * parport_ip32_interrupt - interrupt handler
767 * @irq:	interrupt number
768 * @dev_id:	pointer to &struct parport
769 *
770 * Caught interrupts are forwarded to the upper parport layer if IRQ_mode is
771 * %PARPORT_IP32_IRQ_FWD.
772 */
773static irqreturn_t parport_ip32_interrupt(int irq, void *dev_id)
774{
775	struct parport * const p = dev_id;
776	struct parport_ip32_private * const priv = p->physport->private_data;
777	enum parport_ip32_irq_mode irq_mode = priv->irq_mode;
778	switch (irq_mode) {
779	case PARPORT_IP32_IRQ_FWD:
780		parport_generic_irq(irq, p);
781		break;
782	case PARPORT_IP32_IRQ_HERE:
783		parport_ip32_wakeup(p);
784		break;
785	}
786	return IRQ_HANDLED;
787}
788
789/*--- Some utility function to manipulate ECR register -----------------*/
790
791/**
792 * parport_ip32_read_econtrol - read contents of the ECR register
793 * @p:		pointer to &struct parport
794 */
795static inline unsigned int parport_ip32_read_econtrol(struct parport *p)
796{
797	struct parport_ip32_private * const priv = p->physport->private_data;
798	return readb(priv->regs.ecr);
799}
800
801/**
802 * parport_ip32_write_econtrol - write new contents to the ECR register
803 * @p:		pointer to &struct parport
804 * @c:		new value to write
805 */
806static inline void parport_ip32_write_econtrol(struct parport *p,
807					       unsigned int c)
808{
809	struct parport_ip32_private * const priv = p->physport->private_data;
810	writeb(c, priv->regs.ecr);
811}
812
813/**
814 * parport_ip32_frob_econtrol - change bits from the ECR register
815 * @p:		pointer to &struct parport
816 * @mask:	bit mask of bits to change
817 * @val:	new value for changed bits
818 *
819 * Read from the ECR, mask out the bits in @mask, exclusive-or with the bits
820 * in @val, and write the result to the ECR.
821 */
822static inline void parport_ip32_frob_econtrol(struct parport *p,
823					      unsigned int mask,
824					      unsigned int val)
825{
826	unsigned int c;
827	c = (parport_ip32_read_econtrol(p) & ~mask) ^ val;
828	parport_ip32_write_econtrol(p, c);
829}
830
831/**
832 * parport_ip32_set_mode - change mode of ECP port
833 * @p:		pointer to &struct parport
834 * @mode:	new mode to write in ECR
835 *
836 * ECR is reset in a sane state (interrupts and DMA disabled), and placed in
837 * mode @mode.  Go through PS2 mode if needed.
838 */
839static void parport_ip32_set_mode(struct parport *p, unsigned int mode)
840{
841	unsigned int omode;
842
843	mode &= ECR_MODE_MASK;
844	omode = parport_ip32_read_econtrol(p) & ECR_MODE_MASK;
845
846	if (!(mode == ECR_MODE_SPP || mode == ECR_MODE_PS2
847	      || omode == ECR_MODE_SPP || omode == ECR_MODE_PS2)) {
848		/* We have to go through PS2 mode */
849		unsigned int ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
850		parport_ip32_write_econtrol(p, ecr);
851	}
852	parport_ip32_write_econtrol(p, mode | ECR_nERRINTR | ECR_SERVINTR);
853}
854
855/*--- Basic functions needed for parport -------------------------------*/
856
857/**
858 * parport_ip32_read_data - return current contents of the DATA register
859 * @p:		pointer to &struct parport
860 */
861static inline unsigned char parport_ip32_read_data(struct parport *p)
862{
863	struct parport_ip32_private * const priv = p->physport->private_data;
864	return readb(priv->regs.data);
865}
866
867/**
868 * parport_ip32_write_data - set new contents for the DATA register
869 * @p:		pointer to &struct parport
870 * @d:		new value to write
871 */
872static inline void parport_ip32_write_data(struct parport *p, unsigned char d)
873{
874	struct parport_ip32_private * const priv = p->physport->private_data;
875	writeb(d, priv->regs.data);
876}
877
878/**
879 * parport_ip32_read_status - return current contents of the DSR register
880 * @p:		pointer to &struct parport
881 */
882static inline unsigned char parport_ip32_read_status(struct parport *p)
883{
884	struct parport_ip32_private * const priv = p->physport->private_data;
885	return readb(priv->regs.dsr);
886}
887
888/**
889 * __parport_ip32_read_control - return cached contents of the DCR register
890 * @p:		pointer to &struct parport
891 */
892static inline unsigned int __parport_ip32_read_control(struct parport *p)
893{
894	struct parport_ip32_private * const priv = p->physport->private_data;
895	return priv->dcr_cache; /* use soft copy */
896}
897
898/**
899 * __parport_ip32_write_control - set new contents for the DCR register
900 * @p:		pointer to &struct parport
901 * @c:		new value to write
902 */
903static inline void __parport_ip32_write_control(struct parport *p,
904						unsigned int c)
905{
906	struct parport_ip32_private * const priv = p->physport->private_data;
907	CHECK_EXTRA_BITS(p, c, priv->dcr_writable);
908	c &= priv->dcr_writable; /* only writable bits */
909	writeb(c, priv->regs.dcr);
910	priv->dcr_cache = c;		/* update soft copy */
911}
912
913/**
914 * __parport_ip32_frob_control - change bits from the DCR register
915 * @p:		pointer to &struct parport
916 * @mask:	bit mask of bits to change
917 * @val:	new value for changed bits
918 *
919 * This is equivalent to read from the DCR, mask out the bits in @mask,
920 * exclusive-or with the bits in @val, and write the result to the DCR.
921 * Actually, the cached contents of the DCR is used.
922 */
923static inline void __parport_ip32_frob_control(struct parport *p,
924					       unsigned int mask,
925					       unsigned int val)
926{
927	unsigned int c;
928	c = (__parport_ip32_read_control(p) & ~mask) ^ val;
929	__parport_ip32_write_control(p, c);
930}
931
932/**
933 * parport_ip32_read_control - return cached contents of the DCR register
934 * @p:		pointer to &struct parport
935 *
936 * The return value is masked so as to only return the value of %DCR_STROBE,
937 * %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
938 */
939static inline unsigned char parport_ip32_read_control(struct parport *p)
940{
941	const unsigned int rm =
942		DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
943	return __parport_ip32_read_control(p) & rm;
944}
945
946/**
947 * parport_ip32_write_control - set new contents for the DCR register
948 * @p:		pointer to &struct parport
949 * @c:		new value to write
950 *
951 * The value is masked so as to only change the value of %DCR_STROBE,
952 * %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
953 */
954static inline void parport_ip32_write_control(struct parport *p,
955					      unsigned char c)
956{
957	const unsigned int wm =
958		DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
959	CHECK_EXTRA_BITS(p, c, wm);
960	__parport_ip32_frob_control(p, wm, c & wm);
961}
962
963/**
964 * parport_ip32_frob_control - change bits from the DCR register
965 * @p:		pointer to &struct parport
966 * @mask:	bit mask of bits to change
967 * @val:	new value for changed bits
968 *
969 * This differs from __parport_ip32_frob_control() in that it only allows to
970 * change the value of %DCR_STROBE, %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
971 */
972static inline unsigned char parport_ip32_frob_control(struct parport *p,
973						      unsigned char mask,
974						      unsigned char val)
975{
976	const unsigned int wm =
977		DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
978	CHECK_EXTRA_BITS(p, mask, wm);
979	CHECK_EXTRA_BITS(p, val, wm);
980	__parport_ip32_frob_control(p, mask & wm, val & wm);
981	return parport_ip32_read_control(p);
982}
983
984/**
985 * parport_ip32_disable_irq - disable interrupts on the rising edge of nACK
986 * @p:		pointer to &struct parport
987 */
988static inline void parport_ip32_disable_irq(struct parport *p)
989{
990	__parport_ip32_frob_control(p, DCR_IRQ, 0);
991}
992
993/**
994 * parport_ip32_enable_irq - enable interrupts on the rising edge of nACK
995 * @p:		pointer to &struct parport
996 */
997static inline void parport_ip32_enable_irq(struct parport *p)
998{
999	__parport_ip32_frob_control(p, DCR_IRQ, DCR_IRQ);
1000}
1001
1002/**
1003 * parport_ip32_data_forward - enable host-to-peripheral communications
1004 * @p:		pointer to &struct parport
1005 *
1006 * Enable the data line drivers, for 8-bit host-to-peripheral communications.
1007 */
1008static inline void parport_ip32_data_forward(struct parport *p)
1009{
1010	__parport_ip32_frob_control(p, DCR_DIR, 0);
1011}
1012
1013/**
1014 * parport_ip32_data_reverse - enable peripheral-to-host communications
1015 * @p:		pointer to &struct parport
1016 *
1017 * Place the data bus in a high impedance state, if @p->modes has the
1018 * PARPORT_MODE_TRISTATE bit set.
1019 */
1020static inline void parport_ip32_data_reverse(struct parport *p)
1021{
1022	__parport_ip32_frob_control(p, DCR_DIR, DCR_DIR);
1023}
1024
1025/**
1026 * parport_ip32_init_state - for core parport code
1027 * @dev:	pointer to &struct pardevice
1028 * @s:		pointer to &struct parport_state to initialize
1029 */
1030static void parport_ip32_init_state(struct pardevice *dev,
1031				    struct parport_state *s)
1032{
1033	s->u.ip32.dcr = DCR_SELECT | DCR_nINIT;
1034	s->u.ip32.ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
1035}
1036
1037/**
1038 * parport_ip32_save_state - for core parport code
1039 * @p:		pointer to &struct parport
1040 * @s:		pointer to &struct parport_state to save state to
1041 */
1042static void parport_ip32_save_state(struct parport *p,
1043				    struct parport_state *s)
1044{
1045	s->u.ip32.dcr = __parport_ip32_read_control(p);
1046	s->u.ip32.ecr = parport_ip32_read_econtrol(p);
1047}
1048
1049/**
1050 * parport_ip32_restore_state - for core parport code
1051 * @p:		pointer to &struct parport
1052 * @s:		pointer to &struct parport_state to restore state from
1053 */
1054static void parport_ip32_restore_state(struct parport *p,
1055				       struct parport_state *s)
1056{
1057	parport_ip32_set_mode(p, s->u.ip32.ecr & ECR_MODE_MASK);
1058	parport_ip32_write_econtrol(p, s->u.ip32.ecr);
1059	__parport_ip32_write_control(p, s->u.ip32.dcr);
1060}
1061
1062/*--- EPP mode functions -----------------------------------------------*/
1063
1064/**
1065 * parport_ip32_clear_epp_timeout - clear Timeout bit in EPP mode
1066 * @p:		pointer to &struct parport
1067 *
1068 * Returns 1 if the Timeout bit is clear, and 0 otherwise.
1069 */
1070static unsigned int parport_ip32_clear_epp_timeout(struct parport *p)
1071{
1072	struct parport_ip32_private * const priv = p->physport->private_data;
1073	unsigned int cleared;
1074
1075	if (!(parport_ip32_read_status(p) & DSR_TIMEOUT))
1076		cleared = 1;
1077	else {
1078		unsigned int r;
1079		/* To clear timeout some chips require double read */
1080		parport_ip32_read_status(p);
1081		r = parport_ip32_read_status(p);
1082		/* Some reset by writing 1 */
1083		writeb(r | DSR_TIMEOUT, priv->regs.dsr);
1084		/* Others by writing 0 */
1085		writeb(r & ~DSR_TIMEOUT, priv->regs.dsr);
1086
1087		r = parport_ip32_read_status(p);
1088		cleared = !(r & DSR_TIMEOUT);
1089	}
1090
1091	pr_trace(p, "(): %s", cleared ? "cleared" : "failed");
1092	return cleared;
1093}
1094
1095/**
1096 * parport_ip32_epp_read - generic EPP read function
1097 * @eppreg:	I/O register to read from
1098 * @p:		pointer to &struct parport
1099 * @buf:	buffer to store read data
1100 * @len:	length of buffer @buf
1101 * @flags:	may be PARPORT_EPP_FAST
1102 */
1103static size_t parport_ip32_epp_read(void __iomem *eppreg,
1104				    struct parport *p, void *buf,
1105				    size_t len, int flags)
1106{
1107	struct parport_ip32_private * const priv = p->physport->private_data;
1108	size_t got;
1109	parport_ip32_set_mode(p, ECR_MODE_EPP);
1110	parport_ip32_data_reverse(p);
1111	parport_ip32_write_control(p, DCR_nINIT);
1112	if ((flags & PARPORT_EPP_FAST) && (len > 1)) {
1113		readsb(eppreg, buf, len);
1114		if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
1115			parport_ip32_clear_epp_timeout(p);
1116			return -EIO;
1117		}
1118		got = len;
1119	} else {
1120		u8 *bufp = buf;
1121		for (got = 0; got < len; got++) {
1122			*bufp++ = readb(eppreg);
1123			if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
1124				parport_ip32_clear_epp_timeout(p);
1125				break;
1126			}
1127		}
1128	}
1129	parport_ip32_data_forward(p);
1130	parport_ip32_set_mode(p, ECR_MODE_PS2);
1131	return got;
1132}
1133
1134/**
1135 * parport_ip32_epp_write - generic EPP write function
1136 * @eppreg:	I/O register to write to
1137 * @p:		pointer to &struct parport
1138 * @buf:	buffer of data to write
1139 * @len:	length of buffer @buf
1140 * @flags:	may be PARPORT_EPP_FAST
1141 */
1142static size_t parport_ip32_epp_write(void __iomem *eppreg,
1143				     struct parport *p, const void *buf,
1144				     size_t len, int flags)
1145{
1146	struct parport_ip32_private * const priv = p->physport->private_data;
1147	size_t written;
1148	parport_ip32_set_mode(p, ECR_MODE_EPP);
1149	parport_ip32_data_forward(p);
1150	parport_ip32_write_control(p, DCR_nINIT);
1151	if ((flags & PARPORT_EPP_FAST) && (len > 1)) {
1152		writesb(eppreg, buf, len);
1153		if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
1154			parport_ip32_clear_epp_timeout(p);
1155			return -EIO;
1156		}
1157		written = len;
1158	} else {
1159		const u8 *bufp = buf;
1160		for (written = 0; written < len; written++) {
1161			writeb(*bufp++, eppreg);
1162			if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
1163				parport_ip32_clear_epp_timeout(p);
1164				break;
1165			}
1166		}
1167	}
1168	parport_ip32_set_mode(p, ECR_MODE_PS2);
1169	return written;
1170}
1171
1172/**
1173 * parport_ip32_epp_read_data - read a block of data in EPP mode
1174 * @p:		pointer to &struct parport
1175 * @buf:	buffer to store read data
1176 * @len:	length of buffer @buf
1177 * @flags:	may be PARPORT_EPP_FAST
1178 */
1179static size_t parport_ip32_epp_read_data(struct parport *p, void *buf,
1180					 size_t len, int flags)
1181{
1182	struct parport_ip32_private * const priv = p->physport->private_data;
1183	return parport_ip32_epp_read(priv->regs.eppData0, p, buf, len, flags);
1184}
1185
1186/**
1187 * parport_ip32_epp_write_data - write a block of data in EPP mode
1188 * @p:		pointer to &struct parport
1189 * @buf:	buffer of data to write
1190 * @len:	length of buffer @buf
1191 * @flags:	may be PARPORT_EPP_FAST
1192 */
1193static size_t parport_ip32_epp_write_data(struct parport *p, const void *buf,
1194					  size_t len, int flags)
1195{
1196	struct parport_ip32_private * const priv = p->physport->private_data;
1197	return parport_ip32_epp_write(priv->regs.eppData0, p, buf, len, flags);
1198}
1199
1200/**
1201 * parport_ip32_epp_read_addr - read a block of addresses in EPP mode
1202 * @p:		pointer to &struct parport
1203 * @buf:	buffer to store read data
1204 * @len:	length of buffer @buf
1205 * @flags:	may be PARPORT_EPP_FAST
1206 */
1207static size_t parport_ip32_epp_read_addr(struct parport *p, void *buf,
1208					 size_t len, int flags)
1209{
1210	struct parport_ip32_private * const priv = p->physport->private_data;
1211	return parport_ip32_epp_read(priv->regs.eppAddr, p, buf, len, flags);
1212}
1213
1214/**
1215 * parport_ip32_epp_write_addr - write a block of addresses in EPP mode
1216 * @p:		pointer to &struct parport
1217 * @buf:	buffer of data to write
1218 * @len:	length of buffer @buf
1219 * @flags:	may be PARPORT_EPP_FAST
1220 */
1221static size_t parport_ip32_epp_write_addr(struct parport *p, const void *buf,
1222					  size_t len, int flags)
1223{
1224	struct parport_ip32_private * const priv = p->physport->private_data;
1225	return parport_ip32_epp_write(priv->regs.eppAddr, p, buf, len, flags);
1226}
1227
1228/*--- ECP mode functions (FIFO) ----------------------------------------*/
1229
1230/**
1231 * parport_ip32_fifo_wait_break - check if the waiting function should return
1232 * @p:		pointer to &struct parport
1233 * @expire:	timeout expiring date, in jiffies
1234 *
1235 * parport_ip32_fifo_wait_break() checks if the waiting function should return
1236 * immediately or not.  The break conditions are:
1237 *	- expired timeout;
1238 *	- a pending signal;
1239 *	- nFault asserted low.
1240 * This function also calls cond_resched().
1241 */
1242static unsigned int parport_ip32_fifo_wait_break(struct parport *p,
1243						 unsigned long expire)
1244{
1245	cond_resched();
1246	if (time_after(jiffies, expire)) {
1247		pr_debug1(PPIP32 "%s: FIFO write timed out\n", p->name);
1248		return 1;
1249	}
1250	if (signal_pending(current)) {
1251		pr_debug1(PPIP32 "%s: Signal pending\n", p->name);
1252		return 1;
1253	}
1254	if (!(parport_ip32_read_status(p) & DSR_nFAULT)) {
1255		pr_debug1(PPIP32 "%s: nFault asserted low\n", p->name);
1256		return 1;
1257	}
1258	return 0;
1259}
1260
1261/**
1262 * parport_ip32_fwp_wait_polling - wait for FIFO to empty (polling)
1263 * @p:		pointer to &struct parport
1264 *
1265 * Returns the number of bytes that can safely be written in the FIFO.  A
1266 * return value of zero means that the calling function should terminate as
1267 * fast as possible.
1268 */
1269static unsigned int parport_ip32_fwp_wait_polling(struct parport *p)
1270{
1271	struct parport_ip32_private * const priv = p->physport->private_data;
1272	struct parport * const physport = p->physport;
1273	unsigned long expire;
1274	unsigned int count;
1275	unsigned int ecr;
1276
1277	expire = jiffies + physport->cad->timeout;
1278	count = 0;
1279	while (1) {
1280		if (parport_ip32_fifo_wait_break(p, expire))
1281			break;
1282
1283		/* Check FIFO state.  We do nothing when the FIFO is nor full,
1284		 * nor empty.  It appears that the FIFO full bit is not always
1285		 * reliable, the FIFO state is sometimes wrongly reported, and
1286		 * the chip gets confused if we give it another byte. */
1287		ecr = parport_ip32_read_econtrol(p);
1288		if (ecr & ECR_F_EMPTY) {
1289			/* FIFO is empty, fill it up */
1290			count = priv->fifo_depth;
1291			break;
1292		}
1293
1294		/* Wait a moment... */
1295		udelay(FIFO_POLLING_INTERVAL);
1296	} /* while (1) */
1297
1298	return count;
1299}
1300
1301/**
1302 * parport_ip32_fwp_wait_interrupt - wait for FIFO to empty (interrupt-driven)
1303 * @p:		pointer to &struct parport
1304 *
1305 * Returns the number of bytes that can safely be written in the FIFO.  A
1306 * return value of zero means that the calling function should terminate as
1307 * fast as possible.
1308 */
1309static unsigned int parport_ip32_fwp_wait_interrupt(struct parport *p)
1310{
1311	static unsigned int lost_interrupt = 0;
1312	struct parport_ip32_private * const priv = p->physport->private_data;
1313	struct parport * const physport = p->physport;
1314	unsigned long nfault_timeout;
1315	unsigned long expire;
1316	unsigned int count;
1317	unsigned int ecr;
1318
1319	nfault_timeout = min((unsigned long)physport->cad->timeout,
1320			     msecs_to_jiffies(FIFO_NFAULT_TIMEOUT));
1321	expire = jiffies + physport->cad->timeout;
1322	count = 0;
1323	while (1) {
1324		if (parport_ip32_fifo_wait_break(p, expire))
1325			break;
1326
1327		/* Initialize mutex used to take interrupts into account */
1328		INIT_COMPLETION(priv->irq_complete);
1329
1330		/* Enable serviceIntr */
1331		parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
1332
1333		/* Enabling serviceIntr while the FIFO is empty does not
1334		 * always generate an interrupt, so check for emptiness
1335		 * now. */
1336		ecr = parport_ip32_read_econtrol(p);
1337		if (!(ecr & ECR_F_EMPTY)) {
1338			/* FIFO is not empty: wait for an interrupt or a
1339			 * timeout to occur */
1340			wait_for_completion_interruptible_timeout(
1341				&priv->irq_complete, nfault_timeout);
1342			ecr = parport_ip32_read_econtrol(p);
1343			if ((ecr & ECR_F_EMPTY) && !(ecr & ECR_SERVINTR)
1344			    && !lost_interrupt) {
1345				printk(KERN_WARNING PPIP32
1346				       "%s: lost interrupt in %s\n",
1347				       p->name, __func__);
1348				lost_interrupt = 1;
1349			}
1350		}
1351
1352		/* Disable serviceIntr */
1353		parport_ip32_frob_econtrol(p, ECR_SERVINTR, ECR_SERVINTR);
1354
1355		/* Check FIFO state */
1356		if (ecr & ECR_F_EMPTY) {
1357			/* FIFO is empty, fill it up */
1358			count = priv->fifo_depth;
1359			break;
1360		} else if (ecr & ECR_SERVINTR) {
1361			/* FIFO is not empty, but we know that can safely push
1362			 * writeIntrThreshold bytes into it */
1363			count = priv->writeIntrThreshold;
1364			break;
1365		}
1366		/* FIFO is not empty, and we did not get any interrupt.
1367		 * Either it's time to check for nFault, or a signal is
1368		 * pending.  This is verified in
1369		 * parport_ip32_fifo_wait_break(), so we continue the loop. */
1370	} /* while (1) */
1371
1372	return count;
1373}
1374
1375/**
1376 * parport_ip32_fifo_write_block_pio - write a block of data (PIO mode)
1377 * @p:		pointer to &struct parport
1378 * @buf:	buffer of data to write
1379 * @len:	length of buffer @buf
1380 *
1381 * Uses PIO to write the contents of the buffer @buf into the parallel port
1382 * FIFO.  Returns the number of bytes that were actually written.  It can work
1383 * with or without the help of interrupts.  The parallel port must be
1384 * correctly initialized before calling parport_ip32_fifo_write_block_pio().
1385 */
1386static size_t parport_ip32_fifo_write_block_pio(struct parport *p,
1387						const void *buf, size_t len)
1388{
1389	struct parport_ip32_private * const priv = p->physport->private_data;
1390	const u8 *bufp = buf;
1391	size_t left = len;
1392
1393	priv->irq_mode = PARPORT_IP32_IRQ_HERE;
1394
1395	while (left > 0) {
1396		unsigned int count;
1397
1398		count = (p->irq == PARPORT_IRQ_NONE) ?
1399			parport_ip32_fwp_wait_polling(p) :
1400			parport_ip32_fwp_wait_interrupt(p);
1401		if (count == 0)
1402			break;	/* Transmission should be stopped */
1403		if (count > left)
1404			count = left;
1405		if (count == 1) {
1406			writeb(*bufp, priv->regs.fifo);
1407			bufp++, left--;
1408		} else {
1409			writesb(priv->regs.fifo, bufp, count);
1410			bufp += count, left -= count;
1411		}
1412	}
1413
1414	priv->irq_mode = PARPORT_IP32_IRQ_FWD;
1415
1416	return len - left;
1417}
1418
1419/**
1420 * parport_ip32_fifo_write_block_dma - write a block of data (DMA mode)
1421 * @p:		pointer to &struct parport
1422 * @buf:	buffer of data to write
1423 * @len:	length of buffer @buf
1424 *
1425 * Uses DMA to write the contents of the buffer @buf into the parallel port
1426 * FIFO.  Returns the number of bytes that were actually written.  The
1427 * parallel port must be correctly initialized before calling
1428 * parport_ip32_fifo_write_block_dma().
1429 */
1430static size_t parport_ip32_fifo_write_block_dma(struct parport *p,
1431						const void *buf, size_t len)
1432{
1433	struct parport_ip32_private * const priv = p->physport->private_data;
1434	struct parport * const physport = p->physport;
1435	unsigned long nfault_timeout;
1436	unsigned long expire;
1437	size_t written;
1438	unsigned int ecr;
1439
1440	priv->irq_mode = PARPORT_IP32_IRQ_HERE;
1441
1442	parport_ip32_dma_start(DMA_TO_DEVICE, (void *)buf, len);
1443	INIT_COMPLETION(priv->irq_complete);
1444	parport_ip32_frob_econtrol(p, ECR_DMAEN | ECR_SERVINTR, ECR_DMAEN);
1445
1446	nfault_timeout = min((unsigned long)physport->cad->timeout,
1447			     msecs_to_jiffies(FIFO_NFAULT_TIMEOUT));
1448	expire = jiffies + physport->cad->timeout;
1449	while (1) {
1450		if (parport_ip32_fifo_wait_break(p, expire))
1451			break;
1452		wait_for_completion_interruptible_timeout(&priv->irq_complete,
1453							  nfault_timeout);
1454		ecr = parport_ip32_read_econtrol(p);
1455		if (ecr & ECR_SERVINTR)
1456			break;	/* DMA transfer just finished */
1457	}
1458	parport_ip32_dma_stop();
1459	written = len - parport_ip32_dma_get_residue();
1460
1461	priv->irq_mode = PARPORT_IP32_IRQ_FWD;
1462
1463	return written;
1464}
1465
1466/**
1467 * parport_ip32_fifo_write_block - write a block of data
1468 * @p:		pointer to &struct parport
1469 * @buf:	buffer of data to write
1470 * @len:	length of buffer @buf
1471 *
1472 * Uses PIO or DMA to write the contents of the buffer @buf into the parallel
1473 * p FIFO.  Returns the number of bytes that were actually written.
1474 */
1475static size_t parport_ip32_fifo_write_block(struct parport *p,
1476					    const void *buf, size_t len)
1477{
1478	size_t written = 0;
1479	if (len)
1480		written = (p->modes & PARPORT_MODE_DMA) ?
1481			parport_ip32_fifo_write_block_dma(p, buf, len) :
1482			parport_ip32_fifo_write_block_pio(p, buf, len);
1483	return written;
1484}
1485
1486/**
1487 * parport_ip32_drain_fifo - wait for FIFO to empty
1488 * @p:		pointer to &struct parport
1489 * @timeout:	timeout, in jiffies
1490 *
1491 * This function waits for FIFO to empty.  It returns 1 when FIFO is empty, or
1492 * 0 if the timeout @timeout is reached before, or if a signal is pending.
1493 */
1494static unsigned int parport_ip32_drain_fifo(struct parport *p,
1495					    unsigned long timeout)
1496{
1497	unsigned long expire = jiffies + timeout;
1498	unsigned int polling_interval;
1499	unsigned int counter;
1500
1501	/* Busy wait for approx. 200us */
1502	for (counter = 0; counter < 40; counter++) {
1503		if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
1504			break;
1505		if (time_after(jiffies, expire))
1506			break;
1507		if (signal_pending(current))
1508			break;
1509		udelay(5);
1510	}
1511	/* Poll slowly.  Polling interval starts with 1 millisecond, and is
1512	 * increased exponentially until 128.  */
1513	polling_interval = 1; /* msecs */
1514	while (!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY)) {
1515		if (time_after_eq(jiffies, expire))
1516			break;
1517		msleep_interruptible(polling_interval);
1518		if (signal_pending(current))
1519			break;
1520		if (polling_interval < 128)
1521			polling_interval *= 2;
1522	}
1523
1524	return !!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY);
1525}
1526
1527/**
1528 * parport_ip32_get_fifo_residue - reset FIFO
1529 * @p:		pointer to &struct parport
1530 * @mode:	current operation mode (ECR_MODE_PPF or ECR_MODE_ECP)
1531 *
1532 * This function resets FIFO, and returns the number of bytes remaining in it.
1533 */
1534static unsigned int parport_ip32_get_fifo_residue(struct parport *p,
1535						  unsigned int mode)
1536{
1537	struct parport_ip32_private * const priv = p->physport->private_data;
1538	unsigned int residue;
1539	unsigned int cnfga;
1540
1541	if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
1542		residue = 0;
1543	else {
1544		pr_debug1(PPIP32 "%s: FIFO is stuck\n", p->name);
1545
1546		/* Stop all transfers.
1547		 *
1548		 * Microsoft's document instructs to drive DCR_STROBE to 0,
1549		 * but it doesn't work (at least in Compatibility mode, not
1550		 * tested in ECP mode).  Switching directly to Test mode (as
1551		 * in parport_pc) is not an option: it does confuse the port,
1552		 * ECP service interrupts are no more working after that.  A
1553		 * hard reset is then needed to revert to a sane state.
1554		 *
1555		 * Let's hope that the FIFO is really stuck and that the
1556		 * peripheral doesn't wake up now.
1557		 */
1558		parport_ip32_frob_control(p, DCR_STROBE, 0);
1559
1560		/* Fill up FIFO */
1561		for (residue = priv->fifo_depth; residue > 0; residue--) {
1562			if (parport_ip32_read_econtrol(p) & ECR_F_FULL)
1563				break;
1564			writeb(0x00, priv->regs.fifo);
1565		}
1566	}
1567	if (residue)
1568		pr_debug1(PPIP32 "%s: %d PWord%s left in FIFO\n",
1569			  p->name, residue,
1570			  (residue == 1) ? " was" : "s were");
1571
1572	/* Now reset the FIFO */
1573	parport_ip32_set_mode(p, ECR_MODE_PS2);
1574
1575	/* Host recovery for ECP mode */
1576	if (mode == ECR_MODE_ECP) {
1577		parport_ip32_data_reverse(p);
1578		parport_ip32_frob_control(p, DCR_nINIT, 0);
1579		if (parport_wait_peripheral(p, DSR_PERROR, 0))
1580			pr_debug1(PPIP32 "%s: PEerror timeout 1 in %s\n",
1581				  p->name, __func__);
1582		parport_ip32_frob_control(p, DCR_STROBE, DCR_STROBE);
1583		parport_ip32_frob_control(p, DCR_nINIT, DCR_nINIT);
1584		if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR))
1585			pr_debug1(PPIP32 "%s: PEerror timeout 2 in %s\n",
1586				  p->name, __func__);
1587	}
1588
1589	/* Adjust residue if needed */
1590	parport_ip32_set_mode(p, ECR_MODE_CFG);
1591	cnfga = readb(priv->regs.cnfgA);
1592	if (!(cnfga & CNFGA_nBYTEINTRANS)) {
1593		pr_debug1(PPIP32 "%s: cnfgA contains 0x%02x\n",
1594			  p->name, cnfga);
1595		pr_debug1(PPIP32 "%s: Accounting for extra byte\n",
1596			  p->name);
1597		residue++;
1598	}
1599
1600	/* Don't care about partial PWords since we do not support
1601	 * PWord != 1 byte. */
1602
1603	/* Back to forward PS2 mode. */
1604	parport_ip32_set_mode(p, ECR_MODE_PS2);
1605	parport_ip32_data_forward(p);
1606
1607	return residue;
1608}
1609
1610/**
1611 * parport_ip32_compat_write_data - write a block of data in SPP mode
1612 * @p:		pointer to &struct parport
1613 * @buf:	buffer of data to write
1614 * @len:	length of buffer @buf
1615 * @flags:	ignored
1616 */
1617static size_t parport_ip32_compat_write_data(struct parport *p,
1618					     const void *buf, size_t len,
1619					     int flags)
1620{
1621	static unsigned int ready_before = 1;
1622	struct parport_ip32_private * const priv = p->physport->private_data;
1623	struct parport * const physport = p->physport;
1624	size_t written = 0;
1625
1626	/* Special case: a timeout of zero means we cannot call schedule().
1627	 * Also if O_NONBLOCK is set then use the default implementation. */
1628	if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
1629		return parport_ieee1284_write_compat(p, buf, len, flags);
1630
1631	/* Reset FIFO, go in forward mode, and disable ackIntEn */
1632	parport_ip32_set_mode(p, ECR_MODE_PS2);
1633	parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
1634	parport_ip32_data_forward(p);
1635	parport_ip32_disable_irq(p);
1636	parport_ip32_set_mode(p, ECR_MODE_PPF);
1637	physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
1638
1639	/* Wait for peripheral to become ready */
1640	if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT,
1641				       DSR_nBUSY | DSR_nFAULT)) {
1642		/* Avoid to flood the logs */
1643		if (ready_before)
1644			printk(KERN_INFO PPIP32 "%s: not ready in %s\n",
1645			       p->name, __func__);
1646		ready_before = 0;
1647		goto stop;
1648	}
1649	ready_before = 1;
1650
1651	written = parport_ip32_fifo_write_block(p, buf, len);
1652
1653	/* Wait FIFO to empty.  Timeout is proportional to FIFO_depth.  */
1654	parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth);
1655
1656	/* Check for a potential residue */
1657	written -= parport_ip32_get_fifo_residue(p, ECR_MODE_PPF);
1658
1659	/* Then, wait for BUSY to get low. */
1660	if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY))
1661		printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n",
1662		       p->name, __func__);
1663
1664stop:
1665	/* Reset FIFO */
1666	parport_ip32_set_mode(p, ECR_MODE_PS2);
1667	physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
1668
1669	return written;
1670}
1671
1672
1673/**
1674 * parport_ip32_ecp_write_data - write a block of data in ECP mode
1675 * @p:		pointer to &struct parport
1676 * @buf:	buffer of data to write
1677 * @len:	length of buffer @buf
1678 * @flags:	ignored
1679 */
1680static size_t parport_ip32_ecp_write_data(struct parport *p,
1681					  const void *buf, size_t len,
1682					  int flags)
1683{
1684	static unsigned int ready_before = 1;
1685	struct parport_ip32_private * const priv = p->physport->private_data;
1686	struct parport * const physport = p->physport;
1687	size_t written = 0;
1688
1689	/* Special case: a timeout of zero means we cannot call schedule().
1690	 * Also if O_NONBLOCK is set then use the default implementation. */
1691	if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
1692		return parport_ieee1284_ecp_write_data(p, buf, len, flags);
1693
1694	/* Negotiate to forward mode if necessary. */
1695	if (physport->ieee1284.phase != IEEE1284_PH_FWD_IDLE) {
1696		/* Event 47: Set nInit high. */
1697		parport_ip32_frob_control(p, DCR_nINIT | DCR_AUTOFD,
1698					     DCR_nINIT | DCR_AUTOFD);
1699
1700		/* Event 49: PError goes high. */
1701		if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR)) {
1702			printk(KERN_DEBUG PPIP32 "%s: PError timeout in %s",
1703			       p->name, __func__);
1704			physport->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
1705			return 0;
1706		}
1707	}
1708
1709	/* Reset FIFO, go in forward mode, and disable ackIntEn */
1710	parport_ip32_set_mode(p, ECR_MODE_PS2);
1711	parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
1712	parport_ip32_data_forward(p);
1713	parport_ip32_disable_irq(p);
1714	parport_ip32_set_mode(p, ECR_MODE_ECP);
1715	physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
1716
1717	/* Wait for peripheral to become ready */
1718	if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT,
1719				       DSR_nBUSY | DSR_nFAULT)) {
1720		/* Avoid to flood the logs */
1721		if (ready_before)
1722			printk(KERN_INFO PPIP32 "%s: not ready in %s\n",
1723			       p->name, __func__);
1724		ready_before = 0;
1725		goto stop;
1726	}
1727	ready_before = 1;
1728
1729	written = parport_ip32_fifo_write_block(p, buf, len);
1730
1731	/* Wait FIFO to empty.  Timeout is proportional to FIFO_depth.  */
1732	parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth);
1733
1734	/* Check for a potential residue */
1735	written -= parport_ip32_get_fifo_residue(p, ECR_MODE_ECP);
1736
1737	/* Then, wait for BUSY to get low. */
1738	if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY))
1739		printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n",
1740		       p->name, __func__);
1741
1742stop:
1743	/* Reset FIFO */
1744	parport_ip32_set_mode(p, ECR_MODE_PS2);
1745	physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
1746
1747	return written;
1748}
1749
1750
1751/*--- Default parport operations ---------------------------------------*/
1752
1753static __initdata struct parport_operations parport_ip32_ops = {
1754	.write_data		= parport_ip32_write_data,
1755	.read_data		= parport_ip32_read_data,
1756
1757	.write_control		= parport_ip32_write_control,
1758	.read_control		= parport_ip32_read_control,
1759	.frob_control		= parport_ip32_frob_control,
1760
1761	.read_status		= parport_ip32_read_status,
1762
1763	.enable_irq		= parport_ip32_enable_irq,
1764	.disable_irq		= parport_ip32_disable_irq,
1765
1766	.data_forward		= parport_ip32_data_forward,
1767	.data_reverse		= parport_ip32_data_reverse,
1768
1769	.init_state		= parport_ip32_init_state,
1770	.save_state		= parport_ip32_save_state,
1771	.restore_state		= parport_ip32_restore_state,
1772
1773	.epp_write_data		= parport_ieee1284_epp_write_data,
1774	.epp_read_data		= parport_ieee1284_epp_read_data,
1775	.epp_write_addr		= parport_ieee1284_epp_write_addr,
1776	.epp_read_addr		= parport_ieee1284_epp_read_addr,
1777
1778	.ecp_write_data		= parport_ieee1284_ecp_write_data,
1779	.ecp_read_data		= parport_ieee1284_ecp_read_data,
1780	.ecp_write_addr		= parport_ieee1284_ecp_write_addr,
1781
1782	.compat_write_data	= parport_ieee1284_write_compat,
1783	.nibble_read_data	= parport_ieee1284_read_nibble,
1784	.byte_read_data		= parport_ieee1284_read_byte,
1785
1786	.owner			= THIS_MODULE,
1787};
1788
1789/*--- Device detection -------------------------------------------------*/
1790
1791/**
1792 * parport_ip32_ecp_supported - check for an ECP port
1793 * @p:		pointer to the &parport structure
1794 *
1795 * Returns 1 if an ECP port is found, and 0 otherwise.  This function actually
1796 * checks if an Extended Control Register seems to be present.  On successful
1797 * return, the port is placed in SPP mode.
1798 */
1799static __init unsigned int parport_ip32_ecp_supported(struct parport *p)
1800{
1801	struct parport_ip32_private * const priv = p->physport->private_data;
1802	unsigned int ecr;
1803
1804	ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
1805	writeb(ecr, priv->regs.ecr);
1806	if (readb(priv->regs.ecr) != (ecr | ECR_F_EMPTY))
1807		goto fail;
1808
1809	pr_probe(p, "Found working ECR register\n");
1810	parport_ip32_set_mode(p, ECR_MODE_SPP);
1811	parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
1812	return 1;
1813
1814fail:
1815	pr_probe(p, "ECR register not found\n");
1816	return 0;
1817}
1818
1819/**
1820 * parport_ip32_fifo_supported - check for FIFO parameters
1821 * @p:		pointer to the &parport structure
1822 *
1823 * Check for FIFO parameters of an Extended Capabilities Port.  Returns 1 on
1824 * success, and 0 otherwise.  Adjust FIFO parameters in the parport structure.
1825 * On return, the port is placed in SPP mode.
1826 */
1827static __init unsigned int parport_ip32_fifo_supported(struct parport *p)
1828{
1829	struct parport_ip32_private * const priv = p->physport->private_data;
1830	unsigned int configa, configb;
1831	unsigned int pword;
1832	unsigned int i;
1833
1834	/* Configuration mode */
1835	parport_ip32_set_mode(p, ECR_MODE_CFG);
1836	configa = readb(priv->regs.cnfgA);
1837	configb = readb(priv->regs.cnfgB);
1838
1839	/* Find out PWord size */
1840	switch (configa & CNFGA_ID_MASK) {
1841	case CNFGA_ID_8:
1842		pword = 1;
1843		break;
1844	case CNFGA_ID_16:
1845		pword = 2;
1846		break;
1847	case CNFGA_ID_32:
1848		pword = 4;
1849		break;
1850	default:
1851		pr_probe(p, "Unknown implementation ID: 0x%0x\n",
1852			 (configa & CNFGA_ID_MASK) >> CNFGA_ID_SHIFT);
1853		goto fail;
1854		break;
1855	}
1856	if (pword != 1) {
1857		pr_probe(p, "Unsupported PWord size: %u\n", pword);
1858		goto fail;
1859	}
1860	priv->pword = pword;
1861	pr_probe(p, "PWord is %u bits\n", 8 * priv->pword);
1862
1863	/* Check for compression support */
1864	writeb(configb | CNFGB_COMPRESS, priv->regs.cnfgB);
1865	if (readb(priv->regs.cnfgB) & CNFGB_COMPRESS)
1866		pr_probe(p, "Hardware compression detected (unsupported)\n");
1867	writeb(configb & ~CNFGB_COMPRESS, priv->regs.cnfgB);
1868
1869	/* Reset FIFO and go in test mode (no interrupt, no DMA) */
1870	parport_ip32_set_mode(p, ECR_MODE_TST);
1871
1872	/* FIFO must be empty now */
1873	if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) {
1874		pr_probe(p, "FIFO not reset\n");
1875		goto fail;
1876	}
1877
1878	/* Find out FIFO depth. */
1879	priv->fifo_depth = 0;
1880	for (i = 0; i < 1024; i++) {
1881		if (readb(priv->regs.ecr) & ECR_F_FULL) {
1882			/* FIFO full */
1883			priv->fifo_depth = i;
1884			break;
1885		}
1886		writeb((u8)i, priv->regs.fifo);
1887	}
1888	if (i >= 1024) {
1889		pr_probe(p, "Can't fill FIFO\n");
1890		goto fail;
1891	}
1892	if (!priv->fifo_depth) {
1893		pr_probe(p, "Can't get FIFO depth\n");
1894		goto fail;
1895	}
1896	pr_probe(p, "FIFO is %u PWords deep\n", priv->fifo_depth);
1897
1898	/* Enable interrupts */
1899	parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
1900
1901	/* Find out writeIntrThreshold: number of PWords we know we can write
1902	 * if we get an interrupt. */
1903	priv->writeIntrThreshold = 0;
1904	for (i = 0; i < priv->fifo_depth; i++) {
1905		if (readb(priv->regs.fifo) != (u8)i) {
1906			pr_probe(p, "Invalid data in FIFO\n");
1907			goto fail;
1908		}
1909		if (!priv->writeIntrThreshold
1910		    && readb(priv->regs.ecr) & ECR_SERVINTR)
1911			/* writeIntrThreshold reached */
1912			priv->writeIntrThreshold = i + 1;
1913		if (i + 1 < priv->fifo_depth
1914		    && readb(priv->regs.ecr) & ECR_F_EMPTY) {
1915			/* FIFO empty before the last byte? */
1916			pr_probe(p, "Data lost in FIFO\n");
1917			goto fail;
1918		}
1919	}
1920	if (!priv->writeIntrThreshold) {
1921		pr_probe(p, "Can't get writeIntrThreshold\n");
1922		goto fail;
1923	}
1924	pr_probe(p, "writeIntrThreshold is %u\n", priv->writeIntrThreshold);
1925
1926	/* FIFO must be empty now */
1927	if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) {
1928		pr_probe(p, "Can't empty FIFO\n");
1929		goto fail;
1930	}
1931
1932	/* Reset FIFO */
1933	parport_ip32_set_mode(p, ECR_MODE_PS2);
1934	/* Set reverse direction (must be in PS2 mode) */
1935	parport_ip32_data_reverse(p);
1936	/* Test FIFO, no interrupt, no DMA */
1937	parport_ip32_set_mode(p, ECR_MODE_TST);
1938	/* Enable interrupts */
1939	parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
1940
1941	/* Find out readIntrThreshold: number of PWords we can read if we get
1942	 * an interrupt. */
1943	priv->readIntrThreshold = 0;
1944	for (i = 0; i < priv->fifo_depth; i++) {
1945		writeb(0xaa, priv->regs.fifo);
1946		if (readb(priv->regs.ecr) & ECR_SERVINTR) {
1947			/* readIntrThreshold reached */
1948			priv->readIntrThreshold = i + 1;
1949			break;
1950		}
1951	}
1952	if (!priv->readIntrThreshold) {
1953		pr_probe(p, "Can't get readIntrThreshold\n");
1954		goto fail;
1955	}
1956	pr_probe(p, "readIntrThreshold is %u\n", priv->readIntrThreshold);
1957
1958	/* Reset ECR */
1959	parport_ip32_set_mode(p, ECR_MODE_PS2);
1960	parport_ip32_data_forward(p);
1961	parport_ip32_set_mode(p, ECR_MODE_SPP);
1962	return 1;
1963
1964fail:
1965	priv->fifo_depth = 0;
1966	parport_ip32_set_mode(p, ECR_MODE_SPP);
1967	return 0;
1968}
1969
1970/*--- Initialization code ----------------------------------------------*/
1971
1972/**
1973 * parport_ip32_make_isa_registers - compute (ISA) register addresses
1974 * @regs:	pointer to &struct parport_ip32_regs to fill
1975 * @base:	base address of standard and EPP registers
1976 * @base_hi:	base address of ECP registers
1977 * @regshift:	how much to shift register offset by
1978 *
1979 * Compute register addresses, according to the ISA standard.  The addresses
1980 * of the standard and EPP registers are computed from address @base.  The
1981 * addresses of the ECP registers are computed from address @base_hi.
1982 */
1983static void __init
1984parport_ip32_make_isa_registers(struct parport_ip32_regs *regs,
1985				void __iomem *base, void __iomem *base_hi,
1986				unsigned int regshift)
1987{
1988#define r_base(offset)    ((u8 __iomem *)base    + ((offset) << regshift))
1989#define r_base_hi(offset) ((u8 __iomem *)base_hi + ((offset) << regshift))
1990	*regs = (struct parport_ip32_regs){
1991		.data		= r_base(0),
1992		.dsr		= r_base(1),
1993		.dcr		= r_base(2),
1994		.eppAddr	= r_base(3),
1995		.eppData0	= r_base(4),
1996		.eppData1	= r_base(5),
1997		.eppData2	= r_base(6),
1998		.eppData3	= r_base(7),
1999		.ecpAFifo	= r_base(0),
2000		.fifo		= r_base_hi(0),
2001		.cnfgA		= r_base_hi(0),
2002		.cnfgB		= r_base_hi(1),
2003		.ecr		= r_base_hi(2)
2004	};
2005#undef r_base_hi
2006#undef r_base
2007}
2008
2009/**
2010 * parport_ip32_probe_port - probe and register IP32 built-in parallel port
2011 *
2012 * Returns the new allocated &parport structure.  On error, an error code is
2013 * encoded in return value with the ERR_PTR function.
2014 */
2015static __init struct parport *parport_ip32_probe_port(void)
2016{
2017	struct parport_ip32_regs regs;
2018	struct parport_ip32_private *priv = NULL;
2019	struct parport_operations *ops = NULL;
2020	struct parport *p = NULL;
2021	int err;
2022
2023	parport_ip32_make_isa_registers(&regs, &mace->isa.parallel,
2024					&mace->isa.ecp1284, 8 /* regshift */);
2025
2026	ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
2027	priv = kmalloc(sizeof(struct parport_ip32_private), GFP_KERNEL);
2028	p = parport_register_port(0, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, ops);
2029	if (ops == NULL || priv == NULL || p == NULL) {
2030		err = -ENOMEM;
2031		goto fail;
2032	}
2033	p->base = MACE_BASE + offsetof(struct sgi_mace, isa.parallel);
2034	p->base_hi = MACE_BASE + offsetof(struct sgi_mace, isa.ecp1284);
2035	p->private_data = priv;
2036
2037	*ops = parport_ip32_ops;
2038	*priv = (struct parport_ip32_private){
2039		.regs			= regs,
2040		.dcr_writable		= DCR_DIR | DCR_SELECT | DCR_nINIT |
2041					  DCR_AUTOFD | DCR_STROBE,
2042		.irq_mode		= PARPORT_IP32_IRQ_FWD,
2043	};
2044	init_completion(&priv->irq_complete);
2045
2046	/* Probe port. */
2047	if (!parport_ip32_ecp_supported(p)) {
2048		err = -ENODEV;
2049		goto fail;
2050	}
2051	parport_ip32_dump_state(p, "begin init", 0);
2052
2053	/* We found what looks like a working ECR register.  Simply assume
2054	 * that all modes are correctly supported.  Enable basic modes. */
2055	p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT;
2056	p->modes |= PARPORT_MODE_TRISTATE;
2057
2058	if (!parport_ip32_fifo_supported(p)) {
2059		printk(KERN_WARNING PPIP32
2060		       "%s: error: FIFO disabled\n", p->name);
2061		/* Disable hardware modes depending on a working FIFO. */
2062		features &= ~PARPORT_IP32_ENABLE_SPP;
2063		features &= ~PARPORT_IP32_ENABLE_ECP;
2064		/* DMA is not needed if FIFO is not supported.  */
2065		features &= ~PARPORT_IP32_ENABLE_DMA;
2066	}
2067
2068	/* Request IRQ */
2069	if (features & PARPORT_IP32_ENABLE_IRQ) {
2070		int irq = MACEISA_PARALLEL_IRQ;
2071		if (request_irq(irq, parport_ip32_interrupt, 0, p->name, p)) {
2072			printk(KERN_WARNING PPIP32
2073			       "%s: error: IRQ disabled\n", p->name);
2074			/* DMA cannot work without interrupts. */
2075			features &= ~PARPORT_IP32_ENABLE_DMA;
2076		} else {
2077			pr_probe(p, "Interrupt support enabled\n");
2078			p->irq = irq;
2079			priv->dcr_writable |= DCR_IRQ;
2080		}
2081	}
2082
2083	/* Allocate DMA resources */
2084	if (features & PARPORT_IP32_ENABLE_DMA) {
2085		if (parport_ip32_dma_register())
2086			printk(KERN_WARNING PPIP32
2087			       "%s: error: DMA disabled\n", p->name);
2088		else {
2089			pr_probe(p, "DMA support enabled\n");
2090			p->dma = 0; /* arbitrary value != PARPORT_DMA_NONE */
2091			p->modes |= PARPORT_MODE_DMA;
2092		}
2093	}
2094
2095	if (features & PARPORT_IP32_ENABLE_SPP) {
2096		/* Enable compatibility FIFO mode */
2097		p->ops->compat_write_data = parport_ip32_compat_write_data;
2098		p->modes |= PARPORT_MODE_COMPAT;
2099		pr_probe(p, "Hardware support for SPP mode enabled\n");
2100	}
2101	if (features & PARPORT_IP32_ENABLE_EPP) {
2102		/* Set up access functions to use EPP hardware. */
2103		p->ops->epp_read_data = parport_ip32_epp_read_data;
2104		p->ops->epp_write_data = parport_ip32_epp_write_data;
2105		p->ops->epp_read_addr = parport_ip32_epp_read_addr;
2106		p->ops->epp_write_addr = parport_ip32_epp_write_addr;
2107		p->modes |= PARPORT_MODE_EPP;
2108		pr_probe(p, "Hardware support for EPP mode enabled\n");
2109	}
2110	if (features & PARPORT_IP32_ENABLE_ECP) {
2111		/* Enable ECP FIFO mode */
2112		p->ops->ecp_write_data = parport_ip32_ecp_write_data;
2113/*		p->ops->ecp_read_data  = parport_ip32_ecp_read_data; */
2114/*		p->ops->ecp_write_addr = parport_ip32_ecp_write_addr; */
2115		p->modes |= PARPORT_MODE_ECP;
2116		pr_probe(p, "Hardware support for ECP mode enabled\n");
2117	}
2118
2119	/* Initialize the port with sensible values */
2120	parport_ip32_set_mode(p, ECR_MODE_PS2);
2121	parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
2122	parport_ip32_data_forward(p);
2123	parport_ip32_disable_irq(p);
2124	parport_ip32_write_data(p, 0x00);
2125	parport_ip32_dump_state(p, "end init", 0);
2126
2127	/* Print out what we found */
2128	printk(KERN_INFO "%s: SGI IP32 at 0x%lx (0x%lx)",
2129	       p->name, p->base, p->base_hi);
2130	if (p->irq != PARPORT_IRQ_NONE)
2131		printk(", irq %d", p->irq);
2132	printk(" [");
2133#define printmode(x)	if (p->modes & PARPORT_MODE_##x)		\
2134				printk("%s%s", f++ ? "," : "", #x)
2135	{
2136		unsigned int f = 0;
2137		printmode(PCSPP);
2138		printmode(TRISTATE);
2139		printmode(COMPAT);
2140		printmode(EPP);
2141		printmode(ECP);
2142		printmode(DMA);
2143	}
2144#undef printmode
2145	printk("]\n");
2146
2147	parport_announce_port(p);
2148	return p;
2149
2150fail:
2151	if (p)
2152		parport_put_port(p);
2153	kfree(priv);
2154	kfree(ops);
2155	return ERR_PTR(err);
2156}
2157
2158/**
2159 * parport_ip32_unregister_port - unregister a parallel port
2160 * @p:		pointer to the &struct parport
2161 *
2162 * Unregisters a parallel port and free previously allocated resources
2163 * (memory, IRQ, ...).
2164 */
2165static __exit void parport_ip32_unregister_port(struct parport *p)
2166{
2167	struct parport_ip32_private * const priv = p->physport->private_data;
2168	struct parport_operations *ops = p->ops;
2169
2170	parport_remove_port(p);
2171	if (p->modes & PARPORT_MODE_DMA)
2172		parport_ip32_dma_unregister();
2173	if (p->irq != PARPORT_IRQ_NONE)
2174		free_irq(p->irq, p);
2175	parport_put_port(p);
2176	kfree(priv);
2177	kfree(ops);
2178}
2179
2180/**
2181 * parport_ip32_init - module initialization function
2182 */
2183static int __init parport_ip32_init(void)
2184{
2185	pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n");
2186	pr_debug1(PPIP32 "Compiled on %s, %s\n", __DATE__, __TIME__);
2187	this_port = parport_ip32_probe_port();
2188	return IS_ERR(this_port) ? PTR_ERR(this_port) : 0;
2189}
2190
2191/**
2192 * parport_ip32_exit - module termination function
2193 */
2194static void __exit parport_ip32_exit(void)
2195{
2196	parport_ip32_unregister_port(this_port);
2197}
2198
2199/*--- Module stuff -----------------------------------------------------*/
2200
2201MODULE_AUTHOR("Arnaud Giersch <arnaud.giersch@free.fr>");
2202MODULE_DESCRIPTION("SGI IP32 built-in parallel port driver");
2203MODULE_LICENSE("GPL");
2204MODULE_VERSION("0.6");		/* update in parport_ip32_init() too */
2205
2206module_init(parport_ip32_init);
2207module_exit(parport_ip32_exit);
2208
2209module_param(verbose_probing, bool, S_IRUGO);
2210MODULE_PARM_DESC(verbose_probing, "Log chit-chat during initialization");
2211
2212module_param(features, uint, S_IRUGO);
2213MODULE_PARM_DESC(features,
2214		 "Bit mask of features to enable"
2215		 ", bit 0: IRQ support"
2216		 ", bit 1: DMA support"
2217		 ", bit 2: hardware SPP mode"
2218		 ", bit 3: hardware EPP mode"
2219		 ", bit 4: hardware ECP mode");
2220
2221/*--- Inform (X)Emacs about preferred coding style ---------------------*/
2222/*
2223 * Local Variables:
2224 * mode: c
2225 * c-file-style: "linux"
2226 * indent-tabs-mode: t
2227 * tab-width: 8
2228 * fill-column: 78
2229 * ispell-local-dictionary: "american"
2230 * End:
2231 */
2232