1/*-
2 * Copyright (c) 2001-2003
3 *	Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * 	All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * $FreeBSD$
30 *
31 * Fore HE driver for NATM
32 */
33
34/*
35 * Debug statistics of the HE driver
36 */
37struct istats {
38	uint32_t	tdprq_full;
39	uint32_t	hbuf_error;
40	uint32_t	crc_error;
41	uint32_t	len_error;
42	uint32_t	flow_closed;
43	uint32_t	flow_drop;
44	uint32_t	tpd_no_mem;
45	uint32_t	rx_seg;
46	uint32_t	empty_hbuf;
47	uint32_t	short_aal5;
48	uint32_t	badlen_aal5;
49	uint32_t	bug_bad_isw;
50	uint32_t	bug_no_irq_upd;
51	uint32_t	itype_tbrq;
52	uint32_t	itype_tpd;
53	uint32_t	itype_rbps;
54	uint32_t	itype_rbpl;
55	uint32_t	itype_rbrq;
56	uint32_t	itype_rbrqt;
57	uint32_t	itype_unknown;
58	uint32_t	itype_phys;
59	uint32_t	itype_err;
60	uint32_t	defrag;
61	uint32_t	mcc;
62	uint32_t	oec;
63	uint32_t	dcc;
64	uint32_t	cec;
65	uint32_t	no_rcv_mbuf;
66};
67
68/* Card memory layout parameters */
69#define HE_CONFIG_MEM_LAYOUT {						\
70	{			/* 155 */				\
71	  20,			/* cells_per_row */			\
72	  1024,			/* bytes_per_row */			\
73	  512,			/* r0_numrows */			\
74	  1018,			/* tx_numrows */			\
75	  512,			/* r1_numrows */			\
76	  6,			/* r0_startrow */			\
77	  2			/* cells_per_lbuf */			\
78	}, {			/* 622 */				\
79	  40,			/* cells_per_row */			\
80	  2048,			/* bytes_per_row */			\
81	  256,			/* r0_numrows */			\
82	  512,			/* tx_numrows */			\
83	  256,			/* r1_numrows */			\
84	  0,			/* r0_startrow */			\
85	  4			/* cells_per_lbuf */			\
86	}								\
87}
88
89/*********************************************************************/
90struct hatm_softc;
91
92/*
93 * A chunk of DMA-able memory
94 */
95struct dmamem {
96	u_int		size;		/* in bytes */
97	u_int		align;		/* alignement */
98	bus_dma_tag_t	tag;		/* DMA tag */
99	void		*base;		/* the memory */
100	bus_addr_t	paddr;		/* physical address */
101	bus_dmamap_t	map;		/* the MAP */
102};
103
104/*
105 * RBP (Receive Buffer Pool) queue entry and queue.
106 */
107struct herbp {
108	u_int		size;		/* RBP number of entries (power of two) */
109	u_int		thresh;		/* interrupt treshold */
110	uint32_t	bsize;		/* buffer size in bytes */
111	u_int		offset;		/* free space at start for small bufs */
112	uint32_t	mask;		/* mask for index */
113	struct dmamem	mem;		/* the queue area */
114	struct he_rbpen	*rbp;
115	uint32_t	head, tail;	/* head and tail */
116};
117
118/*
119 * RBRQ (Receive Buffer Return Queue) entry and queue.
120 */
121struct herbrq {
122	u_int		size;		/* number of entries */
123	u_int		thresh;		/* interrupt threshold */
124	u_int		tout;		/* timeout value */
125	u_int		pcnt;		/* packet count threshold */
126	struct dmamem	mem;		/* memory */
127	struct he_rbrqen *rbrq;
128	uint32_t	head;		/* driver end */
129};
130
131/*
132 * TPDRQ (Transmit Packet Descriptor Ready Queue) entry and queue
133 */
134struct hetpdrq {
135	u_int		size;		/* number of entries */
136	struct dmamem	mem;		/* memory */
137	struct he_tpdrqen *tpdrq;
138	u_int		head;		/* head (copy of adapter) */
139	u_int		tail;		/* written back to adapter */
140};
141
142/*
143 * TBRQ (Transmit Buffer Return Queue) entry and queue
144 */
145struct hetbrq {
146	u_int		size;		/* number of entries */
147	u_int		thresh;		/* interrupt threshold */
148	struct dmamem	mem;		/* memory */
149	struct he_tbrqen *tbrq;
150	u_int		head;		/* adapter end */
151};
152
153/*==================================================================*/
154
155/*
156 * TPDs are 32 byte and must be aligned on 64 byte boundaries. That means,
157 * that half of the space is free. We use this space to plug in a link for
158 * the list of free TPDs. Note, that the m_act member of the mbufs contain
159 * a pointer to the dmamap.
160 *
161 * The maximum number of TDPs is the size of the common transmit packet
162 * descriptor ready queue plus the sizes of the transmit buffer return queues
163 * (currently only queue 0). We allocate and map these TPD when initializing
164 * the card. We also allocate on DMA map for each TPD. Only the map in the
165 * last TPD of a packets is used when a packet is transmitted.
166 * This is signalled by having the mbuf member of this TPD non-zero and
167 * pointing to the mbuf.
168 */
169#define HE_TPD_SIZE		64
170struct tpd {
171	struct he_tpd		tpd;	/* at beginning */
172	SLIST_ENTRY(tpd)	link;	/* free cid list link */
173	struct mbuf		*mbuf;	/* the buf chain */
174	bus_dmamap_t		map;	/* map */
175	uint32_t		cid;	/* CID */
176	uint16_t		no;	/* number of this tpd */
177};
178SLIST_HEAD(tpd_list, tpd);
179
180#define TPD_SET_USED(SC, I) do {				\
181	(SC)->tpd_used[(I) / 8] |= (1 << ((I) % 8));		\
182    } while (0)
183
184#define TPD_CLR_USED(SC, I) do {				\
185	(SC)->tpd_used[(I) / 8] &= ~(1 << ((I) % 8));		\
186    } while (0)
187
188#define TPD_TST_USED(SC, I) ((SC)->tpd_used[(I) / 8] & (1 << ((I) % 8)))
189
190#define TPD_ADDR(SC, I) ((struct tpd *)((char *)sc->tpds.base +	\
191    (I) * HE_TPD_SIZE))
192
193/*==================================================================*/
194
195/*
196 * External MBUFs. The card needs a lot of mbufs in the pools for high
197 * performance. The problem with using mbufs directly is that we would need
198 * a dmamap for each of the mbufs. This can exhaust iommu space on the sparc
199 * and it eats also a lot of processing time. So we use external mbufs
200 * for the small buffers and clusters for the large buffers.
201 * For receive group 0 we use 5 ATM cells, for group 1 one (52 byte) ATM
202 * cell. The mbuf storage is allocated pagewise and one dmamap is used per
203 * page.
204 *
205 * The handle we give to the card for the small buffers is a word combined
206 * of the page number and the number of the chunk in the page. This restricts
207 * the number of chunks per page to 256 (8 bit) and the number of pages to
208 * 65536 (16 bits).
209 *
210 * A chunk may be in one of three states: free, on the card and floating around
211 * in the system. If it is free, it is on one of the two free lists and
212 * start with a struct mbufx_free. Each page has a bitmap that tracks where
213 * its chunks are.
214 *
215 * For large buffers we use mbuf clusters. Here we have two problems: we need
216 * to track the buffers on the card (in the case we want to stop it) and
217 * we need to map the 64bit mbuf address to a 26bit handle for 64-bit machines.
218 * The card uses the buffers in the order we give it to the card. Therefor
219 * we can use a private array holding pointers to the mbufs as a circular
220 * queue for both tasks. This is done with the lbufs member of softc. The
221 * handle for these buffer is the lbufs index ored with a flag.
222 */
223
224/* data space in each external mbuf */
225#define MBUF0_SIZE	(5 * 48)	/* 240 */
226#define MBUF1_SIZE	(52)		/* 1 raw cell */
227
228/* size of the buffer. Must fit data, offset and header */
229#define MBUF0_CHUNK	256		/* 16 free bytes */
230#define MBUF1_CHUNK	96		/* 44 free bytes */
231
232/* start of actual data in buffer */
233#define MBUF0_OFFSET	0
234#define MBUF1_OFFSET	16
235
236#define MBUFL_OFFSET	16		/* two pointers for HARP */
237
238#if PAGE_SIZE > 8192
239#define	MBUF_ALLOC_SIZE	(8192)
240#else
241#define	MBUF_ALLOC_SIZE	(PAGE_SIZE)
242#endif
243
244/* each allocated page has one of these structures at its very end. */
245struct mbuf_page_hdr {
246	uint16_t	nchunks;	/* chunks on this page */
247	bus_dmamap_t	map;		/* the DMA MAP */
248	uint32_t	phys;		/* physical base address */
249	uint32_t	hdroff;		/* chunk header offset */
250	uint32_t	chunksize;	/* chunk size */
251	u_int		pool;		/* pool number */
252};
253struct mbuf_page {
254	char	storage[MBUF_ALLOC_SIZE - sizeof(struct mbuf_page_hdr)];
255	struct mbuf_page_hdr	hdr;
256};
257
258/* numbers per page */
259#define MBUF0_PER_PAGE	((MBUF_ALLOC_SIZE - sizeof(struct mbuf_page_hdr)) / \
260    MBUF0_CHUNK)
261#define MBUF1_PER_PAGE	((MBUF_ALLOC_SIZE - sizeof(struct mbuf_page_hdr)) / \
262    MBUF1_CHUNK)
263
264/*
265 * Convert to/from handles
266 */
267/* small buffers */
268#define MBUF_MAKE_HANDLE(PAGENO, CHUNKNO) \
269	((((PAGENO) << 10) | (CHUNKNO)) << HE_REGS_RBRQ_ADDR)
270#define	MBUF_MAKE_LHANDLE(INDEX) \
271	(MBUF_LARGE_FLAG | ((INDEX) << HE_REGS_RBRQ_ADDR))
272
273/* large buffers */
274#define MBUF_PARSE_HANDLE(HANDLE, PAGENO, CHUNKNO) do {			\
275	(CHUNKNO) = ((HANDLE) >> HE_REGS_RBRQ_ADDR) & 0x3ff;		\
276	(PAGENO) = (((HANDLE) >> 10) >> HE_REGS_RBRQ_ADDR) & 0x3fff;	\
277    } while (0)
278#define	MBUF_PARSE_LHANDLE(HANDLE, INDEX) do {				\
279	(INDEX) = ((HANDLE) >> HE_REGS_RBRQ_ADDR) & 0xffffff;		\
280    } while (0)
281
282#define MBUF_LARGE_FLAG	0x80000000
283
284/* chunks have the following structure at the end (8 byte) */
285struct mbuf_chunk_hdr {
286	uint16_t	pageno;
287	uint8_t		chunkno;
288	uint8_t		flags;
289	u_int		ref_cnt;
290};
291#define	MBUF_CARD	0x01	/* buffer is on card */
292#define	MBUF_USED	0x02	/* buffer is somewhere in the system */
293
294#define MBUFX_STORAGE_SIZE(X) (MBUF##X##_CHUNK	\
295    - sizeof(struct mbuf_chunk_hdr))
296
297struct mbuf0_chunk {
298	char			storage[MBUFX_STORAGE_SIZE(0)];
299	struct mbuf_chunk_hdr	hdr;
300};
301
302struct mbuf1_chunk {
303	char			storage[MBUFX_STORAGE_SIZE(1)];
304	struct mbuf_chunk_hdr	hdr;
305};
306
307struct mbufx_free {
308	struct mbufx_free	*link;
309};
310
311/*==================================================================*/
312
313/*
314 * Interrupt queue
315 */
316struct heirq {
317	u_int		size;	/* number of entries */
318	u_int		thresh;	/* re-interrupt threshold */
319	u_int		line;	/* interrupt line to use */
320	struct dmamem	mem;	/* interrupt queues */
321	uint32_t *	irq;	/* interrupt queue */
322	uint32_t 	head;	/* head index */
323	uint32_t *	tailp;	/* pointer to tail */
324	struct hatm_softc *sc;	/* back pointer */
325	u_int		group;	/* interrupt group */
326};
327
328/*
329 * This structure describes all information for a VCC open on the card.
330 * The array of these structures is indexed by the compressed connection ID
331 * (CID). This structure must begin with the atmio_vcc.
332 */
333struct hevcc {
334	struct atmio_vcc param;		/* traffic parameters */
335	void *		rxhand;		/* NATM protocol block */
336	u_int		vflags;		/* private flags */
337	uint32_t	ipackets;
338	uint32_t	opackets;
339	uint32_t	ibytes;
340	uint32_t	obytes;
341
342	u_int		rc;		/* rate control group for CBR */
343	struct mbuf *	chain;		/* partial received PDU */
344	struct mbuf *	last;		/* last mbuf in chain */
345	u_int		ntpds;		/* number of active TPDs */
346};
347#define HE_VCC_OPEN		0x000f0000
348#define HE_VCC_RX_OPEN		0x00010000
349#define HE_VCC_RX_CLOSING	0x00020000
350#define HE_VCC_TX_OPEN		0x00040000
351#define HE_VCC_TX_CLOSING	0x00080000
352#define HE_VCC_FLOW_CTRL	0x00100000
353
354/*
355 * CBR rate groups
356 */
357struct herg {
358	u_int	refcnt;		/* how many connections reference this group */
359	u_int	rate;		/* the value */
360};
361
362/*
363 * Softc
364 */
365struct hatm_softc {
366	struct ifnet		*ifp;
367	struct mtx		mtx;		/* lock */
368	struct ifmedia		media;		/* media */
369	device_t		dev;		/* device */
370	int			memid;		/* resoure id for memory */
371	struct resource *	memres;		/* memory resource */
372	bus_space_handle_t	memh;		/* handle */
373	bus_space_tag_t		memt;		/* ... and tag */
374	bus_dma_tag_t		parent_tag;	/* global restriction */
375	struct cv		vcc_cv;		/* condition variable */
376	int			irqid;		/* resource id */
377	struct resource *	irqres;		/* resource */
378	void *			ih;		/* interrupt handle */
379	struct utopia		utopia;		/* utopia state */
380
381	/* rest has to be reset by stop */
382	int			he622;		/* this is a HE622 */
383	int			pci64;		/* 64bit bus */
384	char			prod_id[HE_EEPROM_PROD_ID_LEN + 1];
385	char			rev[HE_EEPROM_REV_LEN + 1];
386	struct heirq		irq_0;		/* interrupt queues 0 */
387
388	/* generic network controller state */
389	u_int			cells_per_row;
390	u_int			bytes_per_row;
391	u_int			r0_numrows;
392	u_int			tx_numrows;
393	u_int			r1_numrows;
394	u_int			r0_startrow;
395	u_int			tx_startrow;
396	u_int			r1_startrow;
397	u_int			cells_per_lbuf;
398	u_int			r0_numbuffs;
399	u_int			r1_numbuffs;
400	u_int			tx_numbuffs;
401
402	/* HSP */
403	struct he_hsp		*hsp;
404	struct dmamem		hsp_mem;
405
406	/*** TX ***/
407	struct hetbrq		tbrq;		/* TBRQ 0 */
408	struct hetpdrq		tpdrq;		/* TPDRQ */
409	struct tpd_list		tpd_free;	/* Free TPDs */
410	u_int			tpd_nfree;	/* number of free TPDs */
411	u_int			tpd_total;	/* total TPDs */
412	uint8_t			*tpd_used;	/* bitmap of used TPDs */
413	struct dmamem		tpds;		/* TPD memory */
414	bus_dma_tag_t		tx_tag;		/* DMA tag for all tx mbufs */
415
416	/*** RX ***/
417	/* receive/transmit groups */
418	struct herbp		rbp_s0;		/* RBPS0 */
419	struct herbp		rbp_l0;		/* RBPL0 */
420	struct herbp		rbp_s1;		/* RBPS1 */
421	struct herbrq		rbrq_0;		/* RBRQ0 */
422	struct herbrq		rbrq_1;		/* RBRQ1 */
423
424	/* list of external mbuf storage */
425	bus_dma_tag_t		mbuf_tag;
426	struct mbuf_page	**mbuf_pages;
427	u_int			mbuf_npages;
428	u_int			mbuf_max_pages;
429	struct mbufx_free	*mbuf_list[2];
430
431	/* mbuf cluster tracking and mapping for group 0 */
432	struct mbuf		**lbufs;	/* mbufs */
433	bus_dmamap_t		*rmaps;		/* DMA maps */
434	u_int			lbufs_size;
435	u_int			lbufs_next;
436
437	/* VCCs */
438	struct hevcc		*vccs[HE_MAX_VCCS];
439	u_int			cbr_bw;		/* BW allocated to CBR */
440	u_int			max_tpd;	/* per VCC */
441	u_int			open_vccs;
442	uma_zone_t		vcc_zone;
443
444	/* rate groups */
445	struct herg		rate_ctrl[HE_REGN_CS_STPER];
446
447	/* memory offsets */
448	u_int			tsrb, tsrc, tsrd;
449	u_int			rsrb;
450
451	struct cv		cv_rcclose;	/* condition variable */
452	uint32_t		rate_grid[16][16]; /* our copy */
453
454	/* sysctl support */
455	struct sysctl_ctx_list	sysctl_ctx;
456	struct sysctl_oid	*sysctl_tree;
457
458	/* internal statistics */
459	struct istats		istats;
460
461	u_int			mpsafe;
462
463#ifdef HATM_DEBUG
464	/* debugging */
465	u_int			debug;
466
467	/* transmit mbuf count */
468	int			txmbuf;
469#endif
470};
471
472#define READ4(SC,OFF)	bus_space_read_4(SC->memt, SC->memh, (OFF))
473#define READ2(SC,OFF)	bus_space_read_2(SC->memt, SC->memh, (OFF))
474#define READ1(SC,OFF)	bus_space_read_1(SC->memt, SC->memh, (OFF))
475
476#define WRITE4(SC,OFF,VAL) bus_space_write_4(SC->memt, SC->memh, (OFF), (VAL))
477#define WRITE2(SC,OFF,VAL) bus_space_write_2(SC->memt, SC->memh, (OFF), (VAL))
478#define WRITE1(SC,OFF,VAL) bus_space_write_1(SC->memt, SC->memh, (OFF), (VAL))
479
480#define BARRIER_R(SC) bus_space_barrier(SC->memt, SC->memh, 0, HE_REGO_END, \
481	BUS_SPACE_BARRIER_READ)
482#define BARRIER_W(SC) bus_space_barrier(SC->memt, SC->memh, 0, HE_REGO_END, \
483	BUS_SPACE_BARRIER_WRITE)
484#define BARRIER_RW(SC) bus_space_barrier(SC->memt, SC->memh, 0, HE_REGO_END, \
485	BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)
486
487#define READ_SUNI(SC,OFF) READ4(SC, HE_REGO_SUNI + 4 * (OFF))
488#define WRITE_SUNI(SC,OFF,VAL) WRITE4(SC, HE_REGO_SUNI + 4 * (OFF), (VAL))
489
490#define READ_LB4(SC,OFF)						\
491    ({									\
492	WRITE4(SC, HE_REGO_LB_MEM_ADDR, (OFF));				\
493	WRITE4(SC, HE_REGO_LB_MEM_ACCESS,				\
494	    (HE_REGM_LB_MEM_HNDSHK | HE_REGM_LB_MEM_READ));		\
495	while((READ4(SC, HE_REGO_LB_MEM_ACCESS) & HE_REGM_LB_MEM_HNDSHK))\
496		;							\
497	READ4(SC, HE_REGO_LB_MEM_DATA);					\
498    })
499#define WRITE_LB4(SC,OFF,VAL)						\
500    do {								\
501	WRITE4(SC, HE_REGO_LB_MEM_ADDR, (OFF));				\
502	WRITE4(SC, HE_REGO_LB_MEM_DATA, (VAL));				\
503	WRITE4(SC, HE_REGO_LB_MEM_ACCESS,				\
504	    (HE_REGM_LB_MEM_HNDSHK | HE_REGM_LB_MEM_WRITE));		\
505	while((READ4(SC, HE_REGO_LB_MEM_ACCESS) & HE_REGM_LB_MEM_HNDSHK))\
506		;							\
507    } while(0)
508
509#define WRITE_MEM4(SC,OFF,VAL,SPACE)					\
510    do {								\
511	WRITE4(SC, HE_REGO_CON_DAT, (VAL));				\
512	WRITE4(SC, HE_REGO_CON_CTL,					\
513	    (SPACE | HE_REGM_CON_WE | HE_REGM_CON_STATUS | (OFF)));	\
514	while((READ4(SC, HE_REGO_CON_CTL) & HE_REGM_CON_STATUS) != 0)	\
515		;							\
516    } while(0)
517
518#define READ_MEM4(SC,OFF,SPACE)					\
519    ({									\
520	WRITE4(SC, HE_REGO_CON_CTL,					\
521	    (SPACE | HE_REGM_CON_STATUS | (OFF)));			\
522	while((READ4(SC, HE_REGO_CON_CTL) & HE_REGM_CON_STATUS) != 0)	\
523		;							\
524	READ4(SC, HE_REGO_CON_DAT);					\
525    })
526
527#define WRITE_TCM4(SC,OFF,VAL) WRITE_MEM4(SC,(OFF),(VAL),HE_REGM_CON_TCM)
528#define WRITE_RCM4(SC,OFF,VAL) WRITE_MEM4(SC,(OFF),(VAL),HE_REGM_CON_RCM)
529#define WRITE_MBOX4(SC,OFF,VAL) WRITE_MEM4(SC,(OFF),(VAL),HE_REGM_CON_MBOX)
530
531#define READ_TCM4(SC,OFF) READ_MEM4(SC,(OFF),HE_REGM_CON_TCM)
532#define READ_RCM4(SC,OFF) READ_MEM4(SC,(OFF),HE_REGM_CON_RCM)
533#define READ_MBOX4(SC,OFF) READ_MEM4(SC,(OFF),HE_REGM_CON_MBOX)
534
535#define WRITE_TCM(SC,OFF,BYTES,VAL) 					\
536	WRITE_MEM4(SC,(OFF) | ((~(BYTES) & 0xf) << HE_REGS_CON_DIS),	\
537	    (VAL), HE_REGM_CON_TCM)
538#define WRITE_RCM(SC,OFF,BYTES,VAL) 					\
539	WRITE_MEM4(SC,(OFF) | ((~(BYTES) & 0xf) << HE_REGS_CON_DIS),	\
540	    (VAL), HE_REGM_CON_RCM)
541
542#define READ_TSR(SC,CID,NR)						\
543    ({									\
544	uint32_t _v;							\
545	if((NR) <= 7) {							\
546		_v = READ_TCM4(SC, HE_REGO_TSRA(0,CID,NR));		\
547	} else if((NR) <= 11) {						\
548		_v = READ_TCM4(SC, HE_REGO_TSRB((SC)->tsrb,CID,(NR-8)));\
549	} else if((NR) <= 13) {						\
550		_v = READ_TCM4(SC, HE_REGO_TSRC((SC)->tsrc,CID,(NR-12)));\
551	} else {							\
552		_v = READ_TCM4(SC, HE_REGO_TSRD((SC)->tsrd,CID));	\
553	}								\
554	_v;								\
555    })
556
557#define WRITE_TSR(SC,CID,NR,BEN,VAL)					\
558    do {								\
559	if((NR) <= 7) {							\
560		WRITE_TCM(SC, HE_REGO_TSRA(0,CID,NR),BEN,VAL);		\
561	} else if((NR) <= 11) {						\
562		WRITE_TCM(SC, HE_REGO_TSRB((SC)->tsrb,CID,(NR-8)),BEN,VAL);\
563	} else if((NR) <= 13) {						\
564		WRITE_TCM(SC, HE_REGO_TSRC((SC)->tsrc,CID,(NR-12)),BEN,VAL);\
565	} else {							\
566		WRITE_TCM(SC, HE_REGO_TSRD((SC)->tsrd,CID),BEN,VAL);	\
567	}								\
568    } while(0)
569
570#define READ_RSR(SC,CID,NR)						\
571    ({									\
572	uint32_t _v;							\
573	if((NR) <= 7) {							\
574		_v = READ_RCM4(SC, HE_REGO_RSRA(0,CID,NR));		\
575	} else {							\
576		_v = READ_RCM4(SC, HE_REGO_RSRB((SC)->rsrb,CID,(NR-8)));\
577	}								\
578	_v;								\
579    })
580
581#define WRITE_RSR(SC,CID,NR,BEN,VAL)					\
582    do {								\
583	if((NR) <= 7) {							\
584		WRITE_RCM(SC, HE_REGO_RSRA(0,CID,NR),BEN,VAL);		\
585	} else {							\
586		WRITE_RCM(SC, HE_REGO_RSRB((SC)->rsrb,CID,(NR-8)),BEN,VAL);\
587	}								\
588    } while(0)
589
590#ifdef HATM_DEBUG
591#define DBG(SC, FL, PRINT) do {						\
592	if((SC)->debug & DBG_##FL) { 					\
593		if_printf((SC)->ifp, "%s: ", __func__);			\
594		printf PRINT;						\
595		printf("\n");						\
596	}								\
597    } while (0)
598
599enum {
600	DBG_DUMMY	= 0x0001,	/* default value for -DHATM_DEBUG */
601	DBG_RX		= 0x0002,
602	DBG_TX		= 0x0004,
603	DBG_VCC		= 0x0008,
604	DBG_IOCTL	= 0x0010,
605	DBG_ATTACH	= 0x0020,
606	DBG_INTR	= 0x0040,
607	DBG_DMA		= 0x0080,
608	DBG_DMAH	= 0x0100,
609	DBG_DUMP	= 0x0200,
610
611	DBG_ALL		= 0x03ff
612};
613
614#else
615#define DBG(SC, FL, PRINT)
616#endif
617
618u_int hatm_cps2atmf(uint32_t);
619u_int hatm_atmf2cps(uint32_t);
620
621void hatm_intr(void *);
622int hatm_ioctl(struct ifnet *, u_long, caddr_t);
623void hatm_initialize(struct hatm_softc *);
624void hatm_stop(struct hatm_softc *sc);
625void hatm_start(struct ifnet *);
626
627void hatm_rx(struct hatm_softc *sc, u_int cid, u_int flags, struct mbuf *m,
628    u_int len);
629void hatm_tx_complete(struct hatm_softc *sc, struct tpd *tpd, uint32_t);
630
631int hatm_tx_vcc_can_open(struct hatm_softc *sc, u_int cid, struct hevcc *);
632void hatm_tx_vcc_open(struct hatm_softc *sc, u_int cid);
633void hatm_rx_vcc_open(struct hatm_softc *sc, u_int cid);
634void hatm_tx_vcc_close(struct hatm_softc *sc, u_int cid);
635void hatm_rx_vcc_close(struct hatm_softc *sc, u_int cid);
636void hatm_tx_vcc_closed(struct hatm_softc *sc, u_int cid);
637void hatm_vcc_closed(struct hatm_softc *sc, u_int cid);
638void hatm_load_vc(struct hatm_softc *sc, u_int cid, int reopen);
639
640void hatm_ext_free(struct mbufx_free **, struct mbufx_free *);
641