if_hatmvar.h revision 116491
1/*
2 * Copyright (c) 2001-2003
3 *	Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * 	All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * $FreeBSD: head/sys/dev/hatm/if_hatmvar.h 116491 2003-06-17 16:12:50Z harti $
30 *
31 * Fore HE driver for NATM
32 */
33
34/*
35 * Debug statistics of the HE driver
36 */
37struct istats {
38	uint32_t	tdprq_full;
39	uint32_t	hbuf_error;
40	uint32_t	crc_error;
41	uint32_t	len_error;
42	uint32_t	flow_closed;
43	uint32_t	flow_drop;
44	uint32_t	tpd_no_mem;
45	uint32_t	rx_seg;
46	uint32_t	empty_hbuf;
47	uint32_t	short_aal5;
48	uint32_t	badlen_aal5;
49	uint32_t	bug_bad_isw;
50	uint32_t	bug_no_irq_upd;
51	uint32_t	itype_tbrq;
52	uint32_t	itype_tpd;
53	uint32_t	itype_rbps;
54	uint32_t	itype_rbpl;
55	uint32_t	itype_rbrq;
56	uint32_t	itype_rbrqt;
57	uint32_t	itype_unknown;
58	uint32_t	itype_phys;
59	uint32_t	itype_err;
60	uint32_t	defrag;
61};
62
63/* Card memory layout parameters */
64#define HE_CONFIG_MEM_LAYOUT {						\
65	{			/* 155 */				\
66	  20,			/* cells_per_row */			\
67	  1024,			/* bytes_per_row */			\
68	  512,			/* r0_numrows */			\
69	  1018,			/* tx_numrows */			\
70	  512,			/* r1_numrows */			\
71	  6,			/* r0_startrow */			\
72	  2			/* cells_per_lbuf */			\
73	}, {			/* 622 */				\
74	  40,			/* cells_per_row */			\
75	  2048,			/* bytes_per_row */			\
76	  256,			/* r0_numrows */			\
77	  512,			/* tx_numrows */			\
78	  256,			/* r1_numrows */			\
79	  0,			/* r0_startrow */			\
80	  4			/* cells_per_lbuf */			\
81	}								\
82}
83
84/*********************************************************************/
85struct hatm_softc;
86
87/*
88 * A chunk of DMA-able memory
89 */
90struct dmamem {
91	u_int		size;		/* in bytes */
92	u_int		align;		/* alignement */
93	bus_dma_tag_t	tag;		/* DMA tag */
94	void		*base;		/* the memory */
95	bus_addr_t	paddr;		/* physical address */
96	bus_dmamap_t	map;		/* the MAP */
97};
98
99/*
100 * RBP (Receive Buffer Pool) queue entry and queue.
101 */
102struct herbp {
103	u_int		size;		/* RBP number of entries (power of two) */
104	u_int		thresh;		/* interrupt treshold */
105	uint32_t	bsize;		/* buffer size in bytes */
106	u_int		offset;		/* free space at start for small bufs */
107	uint32_t	mask;		/* mask for index */
108	struct dmamem	mem;		/* the queue area */
109	struct he_rbpen	*rbp;
110	uint32_t	head, tail;	/* head and tail */
111};
112
113/*
114 * RBRQ (Receive Buffer Return Queue) entry and queue.
115 */
116struct herbrq {
117	u_int		size;		/* number of entries */
118	u_int		thresh;		/* interrupt threshold */
119	u_int		tout;		/* timeout value */
120	u_int		pcnt;		/* packet count threshold */
121	struct dmamem	mem;		/* memory */
122	struct he_rbrqen *rbrq;
123	uint32_t	head;		/* driver end */
124};
125
126/*
127 * TPDRQ (Transmit Packet Descriptor Ready Queue) entry and queue
128 */
129struct hetpdrq {
130	u_int		size;		/* number of entries */
131	struct dmamem	mem;		/* memory */
132	struct he_tpdrqen *tpdrq;
133	u_int		head;		/* head (copy of adapter) */
134	u_int		tail;		/* written back to adapter */
135};
136
137/*
138 * TBRQ (Transmit Buffer Return Queue) entry and queue
139 */
140struct hetbrq {
141	u_int		size;		/* number of entries */
142	u_int		thresh;		/* interrupt threshold */
143	struct dmamem	mem;		/* memory */
144	struct he_tbrqen *tbrq;
145	u_int		head;		/* adapter end */
146};
147
148/*==================================================================*/
149
150/*
151 * TPDs are 32 byte and must be aligned on 64 byte boundaries. That means,
152 * that half of the space is free. We use this space to plug in a link for
153 * the list of free TPDs. Note, that the m_act member of the mbufs contain
154 * a pointer to the dmamap.
155 *
156 * The maximum number of TDPs is the size of the common transmit packet
157 * descriptor ready queue plus the sizes of the transmit buffer return queues
158 * (currently only queue 0). We allocate and map these TPD when initializing
159 * the card. We also allocate on DMA map for each TPD. Only the map in the
160 * last TPD of a packets is used when a packet is transmitted.
161 * This is signalled by having the mbuf member of this TPD non-zero and
162 * pointing to the mbuf.
163 */
164#define HE_TPD_SIZE		64
165struct tpd {
166	struct he_tpd		tpd;	/* at beginning */
167	SLIST_ENTRY(tpd)	link;	/* free cid list link */
168	struct mbuf		*mbuf;	/* the buf chain */
169	bus_dmamap_t		map;	/* map */
170	uint32_t		cid;	/* CID */
171	uint16_t		no;	/* number of this tpd */
172};
173SLIST_HEAD(tpd_list, tpd);
174
175#define TPD_SET_USED(SC, I) do {				\
176	(SC)->tpd_used[(I) / 8] |= (1 << ((I) % 8));		\
177    } while (0)
178
179#define TPD_CLR_USED(SC, I) do {				\
180	(SC)->tpd_used[(I) / 8] &= ~(1 << ((I) % 8));		\
181    } while (0)
182
183#define TPD_TST_USED(SC, I) ((SC)->tpd_used[(I) / 8] & (1 << ((I) % 8)))
184
185#define TPD_ADDR(SC, I) ((struct tpd *)((char *)sc->tpds.base +	\
186    (I) * HE_TPD_SIZE))
187
188/*==================================================================*/
189
190/*
191 * External MBUFs. The card needs a lot of mbufs in the pools for high
192 * performance. The problem with using mbufs directly is that we would need
193 * a dmamap for each of the mbufs. This can exhaust iommu space on the sparc
194 * and it eats also a lot of processing time. So we use external mbufs
195 * for the small buffers and clusters for the large buffers.
196 * For receive group 0 we use 5 ATM cells, for group 1 one (52 byte) ATM
197 * cell. The mbuf storage is allocated pagewise and one dmamap is used per
198 * page.
199 *
200 * The handle we give to the card for the small buffers is a word combined
201 * of the page number and the number of the chunk in the page. This restricts
202 * the number of chunks per page to 256 (8 bit) and the number of pages to
203 * 65536 (16 bits).
204 *
205 * A chunk may be in one of three states: free, on the card and floating around
206 * in the system. If it is free, it is on one of the two free lists and
207 * start with a struct mbufx_free. Each page has a bitmap that tracks where
208 * its chunks are.
209 *
210 * For large buffers we use mbuf clusters. Here we have two problems: we need
211 * to track the buffers on the card (in the case we want to stop it) and
212 * we need to map the 64bit mbuf address to a 26bit handle for 64-bit machines.
213 * The card uses the buffers in the order we give it to the card. Therefor
214 * we can use a private array holding pointers to the mbufs as a circular
215 * queue for both tasks. This is done with the lbufs member of softc. The
216 * handle for these buffer is the lbufs index ored with a flag.
217 */
218#define MBUF0_SIZE	(5 * 48)	/* 240 */
219#define MBUF1_SIZE	(52)
220
221#define MBUF0_CHUNK	256		/* 16 free bytes */
222#define MBUF1_CHUNK	96		/* 44 free bytes */
223#ifdef XXX
224#define MBUF0_OFFSET	(MBUF0_CHUNK - sizeof(struct mbuf_chunk_hdr) \
225    - MBUF0_SIZE)
226#else
227#define MBUF0_OFFSET	0
228#endif
229#define MBUF1_OFFSET	(MBUF1_CHUNK - sizeof(struct mbuf_chunk_hdr) \
230    - MBUF1_SIZE)
231#define MBUFL_OFFSET	16		/* two pointers for HARP */
232
233#define MBUF_ALLOC_SIZE	(PAGE_SIZE)
234
235/* each allocated page has one of these structures at its very end. */
236struct mbuf_page_hdr {
237	uint8_t		card[32];	/* bitmap for on-card */
238	uint8_t		used[32];	/* bitmap for used but not on-card */
239	uint16_t	nchunks;	/* chunks on this page */
240	bus_dmamap_t	map;		/* the DMA MAP */
241	uint32_t	phys;		/* physical base address */
242	uint32_t	hdroff;		/* chunk header offset */
243	uint32_t	chunksize;	/* chunk size */
244};
245struct mbuf_page {
246	char	storage[MBUF_ALLOC_SIZE - sizeof(struct mbuf_page_hdr)];
247	struct mbuf_page_hdr	hdr;
248};
249
250/* numbers per page */
251#define MBUF0_PER_PAGE	((MBUF_ALLOC_SIZE - sizeof(struct mbuf_page_hdr)) / \
252    MBUF0_CHUNK)
253#define MBUF1_PER_PAGE	((MBUF_ALLOC_SIZE - sizeof(struct mbuf_page_hdr)) / \
254    MBUF1_CHUNK)
255
256#define MBUF_CLR_BIT(ARRAY, BIT) ((ARRAY)[(BIT) / 8] &= ~(1 << ((BIT) % 8)))
257#define MBUF_SET_BIT(ARRAY, BIT) ((ARRAY)[(BIT) / 8] |= (1 << ((BIT) % 8)))
258#define MBUF_TST_BIT(ARRAY, BIT) ((ARRAY)[(BIT) / 8] & (1 << ((BIT) % 8)))
259
260#define MBUF_MAKE_HANDLE(PAGENO, CHUNKNO) \
261	(((PAGENO) << 10) | (CHUNKNO))
262
263#define MBUF_PARSE_HANDLE(HANDLE, PAGENO, CHUNKNO) do {	\
264	(CHUNKNO) = (HANDLE) & 0x3ff;			\
265	(PAGENO) = ((HANDLE) >> 10) & 0x3ff;		\
266    } while (0)
267
268#define MBUF_LARGE_FLAG	(1 << 20)
269
270/* chunks have the following structure at the end */
271struct mbuf_chunk_hdr {
272	struct mbuf		*mbuf;
273	uint16_t		pageno;
274	uint16_t		chunkno;
275};
276
277#define MBUFX_STORAGE_SIZE(X) (MBUF##X##_CHUNK	\
278    - sizeof(struct mbuf_chunk_hdr))
279
280struct mbuf0_chunk {
281	char			storage[MBUFX_STORAGE_SIZE(0)];
282	struct mbuf_chunk_hdr	hdr;
283};
284
285struct mbuf1_chunk {
286	char			storage[MBUFX_STORAGE_SIZE(1)];
287	struct mbuf_chunk_hdr	hdr;
288};
289
290struct mbufx_free {
291	SLIST_ENTRY(mbufx_free)	link;
292};
293SLIST_HEAD(mbufx_free_list, mbufx_free);
294
295/*==================================================================*/
296
297/*
298 * Interrupt queue
299 */
300struct heirq {
301	u_int		size;	/* number of entries */
302	u_int		thresh;	/* re-interrupt threshold */
303	u_int		line;	/* interrupt line to use */
304	struct dmamem	mem;	/* interrupt queues */
305	uint32_t *	irq;	/* interrupt queue */
306	uint32_t 	head;	/* head index */
307	uint32_t *	tailp;	/* pointer to tail */
308	struct hatm_softc *sc;	/* back pointer */
309	u_int		group;	/* interrupt group */
310};
311
312/*
313 * This structure describes all information for a VCC open on the card.
314 * The array of these structures is indexed by the compressed connection ID
315 * (CID).
316 */
317struct hevcc {
318	u_int		vflags;		/* private flags */
319	void *		rxhand;		/* NATM protocol block */
320	u_int		rc;		/* rate control group for CBR */
321	struct mbuf *	chain;		/* partial received PDU */
322	struct mbuf *	last;		/* last mbuf in chain */
323
324	/* from the OPEN_VCC ioctl */
325	struct atmio_vcc param;		/* traffic parameters */
326
327	uint32_t	ibytes;
328	uint32_t	ipackets;
329	uint32_t	obytes;
330	uint32_t	opackets;
331	u_int		ntpds;		/* number of active TPDs */
332};
333#define HE_VCC_OPEN		0x000f0000
334#define HE_VCC_RX_OPEN		0x00010000
335#define HE_VCC_RX_CLOSING	0x00020000
336#define HE_VCC_TX_OPEN		0x00040000
337#define HE_VCC_TX_CLOSING	0x00080000
338#define HE_VCC_FLOW_CTRL	0x00100000
339#define HE_VCC_ASYNC		0x00200000
340
341/*
342 * CBR rate groups
343 */
344struct herg {
345	u_int	refcnt;		/* how many connections reference this group */
346	u_int	rate;		/* the value */
347};
348
349/*
350 * Softc
351 */
352struct hatm_softc {
353	struct ifatm		ifatm;		/* common ATM stuff */
354	struct mtx		mtx;		/* lock */
355	struct ifmedia		media;		/* media */
356	device_t		dev;		/* device */
357	int			memid;		/* resoure id for memory */
358	struct resource *	memres;		/* memory resource */
359	bus_space_handle_t	memh;		/* handle */
360	bus_space_tag_t		memt;		/* ... and tag */
361	bus_dma_tag_t		parent_tag;	/* global restriction */
362	struct cv		vcc_cv;		/* condition variable */
363	int			irqid;		/* resource id */
364	struct resource *	irqres;		/* resource */
365	void *			ih;		/* interrupt handle */
366	struct utopia		utopia;		/* utopia state */
367
368	/* rest has to be reset by stop */
369	int			he622;		/* this is a HE622 */
370	int			pci64;		/* 64bit bus */
371	char			prod_id[HE_EEPROM_PROD_ID_LEN + 1];
372	char			rev[HE_EEPROM_REV_LEN + 1];
373	struct heirq		irq_0;		/* interrupt queues 0 */
374
375	/* generic network controller state */
376	u_int			cells_per_row;
377	u_int			bytes_per_row;
378	u_int			r0_numrows;
379	u_int			tx_numrows;
380	u_int			r1_numrows;
381	u_int			r0_startrow;
382	u_int			tx_startrow;
383	u_int			r1_startrow;
384	u_int			cells_per_lbuf;
385	u_int			r0_numbuffs;
386	u_int			r1_numbuffs;
387	u_int			tx_numbuffs;
388
389	/* HSP */
390	struct he_hsp		*hsp;
391	struct dmamem		hsp_mem;
392
393	/*** TX ***/
394	struct hetbrq		tbrq;		/* TBRQ 0 */
395	struct hetpdrq		tpdrq;		/* TPDRQ */
396	struct tpd_list		tpd_free;	/* Free TPDs */
397	u_int			tpd_nfree;	/* number of free TPDs */
398	u_int			tpd_total;	/* total TPDs */
399	uint8_t			*tpd_used;	/* bitmap of used TPDs */
400	struct dmamem		tpds;		/* TPD memory */
401	bus_dma_tag_t		tx_tag;		/* DMA tag for all tx mbufs */
402
403	/*** RX ***/
404	/* receive/transmit groups */
405	struct herbp		rbp_s0;		/* RBPS0 */
406	struct herbp		rbp_l0;		/* RBPL0 */
407	struct herbp		rbp_s1;		/* RBPS1 */
408	struct herbrq		rbrq_0;		/* RBRQ0 */
409	struct herbrq		rbrq_1;		/* RBRQ1 */
410
411	/* list of external mbuf storage */
412	bus_dma_tag_t		mbuf_tag;
413	struct mbuf_page	**mbuf_pages;
414	u_int			mbuf_npages;
415	struct mtx		mbuf0_mtx;
416	struct mbufx_free_list	mbuf0_list;
417	struct mtx		mbuf1_mtx;
418	struct mbufx_free_list	mbuf1_list;
419
420	/* mbuf cluster tracking and mapping for group 0 */
421	struct mbuf		**lbufs;	/* mbufs */
422	bus_dmamap_t		*rmaps;		/* DMA maps */
423	u_int			lbufs_size;
424	u_int			lbufs_next;
425
426	/* VCCs */
427	struct hevcc		*vccs[HE_MAX_VCCS];
428	u_int			cbr_bw;		/* BW allocated to CBR */
429	u_int			max_tpd;	/* per VCC */
430	u_int			open_vccs;
431	uma_zone_t		vcc_zone;
432
433	/* rate groups */
434	struct herg		rate_ctrl[HE_REGN_CS_STPER];
435
436	/* memory offsets */
437	u_int			tsrb, tsrc, tsrd;
438	u_int			rsrb;
439
440	struct cv		cv_rcclose;	/* condition variable */
441	uint32_t		rate_grid[16][16]; /* our copy */
442
443	/* sysctl support */
444	struct sysctl_ctx_list	sysctl_ctx;
445	struct sysctl_oid	*sysctl_tree;
446
447	/* internal statistics */
448	struct istats		istats;
449
450#ifdef HATM_DEBUG
451	/* debugging */
452	u_int			debug;
453#endif
454};
455
456#define READ4(SC,OFF)	bus_space_read_4(SC->memt, SC->memh, (OFF))
457#define READ2(SC,OFF)	bus_space_read_2(SC->memt, SC->memh, (OFF))
458#define READ1(SC,OFF)	bus_space_read_1(SC->memt, SC->memh, (OFF))
459
460#define WRITE4(SC,OFF,VAL) bus_space_write_4(SC->memt, SC->memh, (OFF), (VAL))
461#define WRITE2(SC,OFF,VAL) bus_space_write_2(SC->memt, SC->memh, (OFF), (VAL))
462#define WRITE1(SC,OFF,VAL) bus_space_write_1(SC->memt, SC->memh, (OFF), (VAL))
463
464#define BARRIER_R(SC) bus_space_barrier(SC->memt, SC->memh, 0, HE_REGO_END, \
465	BUS_SPACE_BARRIER_READ)
466#define BARRIER_W(SC) bus_space_barrier(SC->memt, SC->memh, 0, HE_REGO_END, \
467	BUS_SPACE_BARRIER_WRITE)
468#define BARRIER_RW(SC) bus_space_barrier(SC->memt, SC->memh, 0, HE_REGO_END, \
469	BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)
470
471#define READ_SUNI(SC,OFF) READ4(SC, HE_REGO_SUNI + 4 * (OFF))
472#define WRITE_SUNI(SC,OFF,VAL) WRITE4(SC, HE_REGO_SUNI + 4 * (OFF), (VAL))
473
474#define READ_LB4(SC,OFF)						\
475    ({									\
476	WRITE4(SC, HE_REGO_LB_MEM_ADDR, (OFF));				\
477	WRITE4(SC, HE_REGO_LB_MEM_ACCESS,				\
478	    (HE_REGM_LB_MEM_HNDSHK | HE_REGM_LB_MEM_READ));		\
479	while((READ4(SC, HE_REGO_LB_MEM_ACCESS) & HE_REGM_LB_MEM_HNDSHK))\
480		;							\
481	READ4(SC, HE_REGO_LB_MEM_DATA);					\
482    })
483#define WRITE_LB4(SC,OFF,VAL)						\
484    do {								\
485	WRITE4(SC, HE_REGO_LB_MEM_ADDR, (OFF));				\
486	WRITE4(SC, HE_REGO_LB_MEM_DATA, (VAL));				\
487	WRITE4(SC, HE_REGO_LB_MEM_ACCESS,				\
488	    (HE_REGM_LB_MEM_HNDSHK | HE_REGM_LB_MEM_WRITE));		\
489	while((READ4(SC, HE_REGO_LB_MEM_ACCESS) & HE_REGM_LB_MEM_HNDSHK))\
490		;							\
491    } while(0)
492
493#define WRITE_MEM4(SC,OFF,VAL,SPACE)					\
494    do {								\
495	WRITE4(SC, HE_REGO_CON_DAT, (VAL));				\
496	WRITE4(SC, HE_REGO_CON_CTL,					\
497	    (SPACE | HE_REGM_CON_WE | HE_REGM_CON_STATUS | (OFF)));	\
498	while((READ4(SC, HE_REGO_CON_CTL) & HE_REGM_CON_STATUS) != 0)	\
499		;							\
500    } while(0)
501
502#define READ_MEM4(SC,OFF,SPACE)					\
503    ({									\
504	WRITE4(SC, HE_REGO_CON_CTL,					\
505	    (SPACE | HE_REGM_CON_STATUS | (OFF)));			\
506	while((READ4(SC, HE_REGO_CON_CTL) & HE_REGM_CON_STATUS) != 0)	\
507		;							\
508	READ4(SC, HE_REGO_CON_DAT);					\
509    })
510
511#define WRITE_TCM4(SC,OFF,VAL) WRITE_MEM4(SC,(OFF),(VAL),HE_REGM_CON_TCM)
512#define WRITE_RCM4(SC,OFF,VAL) WRITE_MEM4(SC,(OFF),(VAL),HE_REGM_CON_RCM)
513#define WRITE_MBOX4(SC,OFF,VAL) WRITE_MEM4(SC,(OFF),(VAL),HE_REGM_CON_MBOX)
514
515#define READ_TCM4(SC,OFF) READ_MEM4(SC,(OFF),HE_REGM_CON_TCM)
516#define READ_RCM4(SC,OFF) READ_MEM4(SC,(OFF),HE_REGM_CON_RCM)
517#define READ_MBOX4(SC,OFF) READ_MEM4(SC,(OFF),HE_REGM_CON_MBOX)
518
519#define WRITE_TCM(SC,OFF,BYTES,VAL) 					\
520	WRITE_MEM4(SC,(OFF) | ((~(BYTES) & 0xf) << HE_REGS_CON_DIS),	\
521	    (VAL), HE_REGM_CON_TCM)
522#define WRITE_RCM(SC,OFF,BYTES,VAL) 					\
523	WRITE_MEM4(SC,(OFF) | ((~(BYTES) & 0xf) << HE_REGS_CON_DIS),	\
524	    (VAL), HE_REGM_CON_RCM)
525
526#define READ_TSR(SC,CID,NR)						\
527    ({									\
528	uint32_t _v;							\
529	if((NR) <= 7) {							\
530		_v = READ_TCM4(SC, HE_REGO_TSRA(0,CID,NR));		\
531	} else if((NR) <= 11) {						\
532		_v = READ_TCM4(SC, HE_REGO_TSRB((SC)->tsrb,CID,(NR-8)));\
533	} else if((NR) <= 13) {						\
534		_v = READ_TCM4(SC, HE_REGO_TSRC((SC)->tsrc,CID,(NR-12)));\
535	} else {							\
536		_v = READ_TCM4(SC, HE_REGO_TSRD((SC)->tsrd,CID));	\
537	}								\
538	_v;								\
539    })
540
541#define WRITE_TSR(SC,CID,NR,BEN,VAL)					\
542    do {								\
543	if((NR) <= 7) {							\
544		WRITE_TCM(SC, HE_REGO_TSRA(0,CID,NR),BEN,VAL);		\
545	} else if((NR) <= 11) {						\
546		WRITE_TCM(SC, HE_REGO_TSRB((SC)->tsrb,CID,(NR-8)),BEN,VAL);\
547	} else if((NR) <= 13) {						\
548		WRITE_TCM(SC, HE_REGO_TSRC((SC)->tsrc,CID,(NR-12)),BEN,VAL);\
549	} else {							\
550		WRITE_TCM(SC, HE_REGO_TSRD((SC)->tsrd,CID),BEN,VAL);	\
551	}								\
552    } while(0)
553
554#define READ_RSR(SC,CID,NR)						\
555    ({									\
556	uint32_t _v;							\
557	if((NR) <= 7) {							\
558		_v = READ_RCM4(SC, HE_REGO_RSRA(0,CID,NR));		\
559	} else {							\
560		_v = READ_RCM4(SC, HE_REGO_RSRB((SC)->rsrb,CID,(NR-8)));\
561	}								\
562	_v;								\
563    })
564
565#define WRITE_RSR(SC,CID,NR,BEN,VAL)					\
566    do {								\
567	if((NR) <= 7) {							\
568		WRITE_RCM(SC, HE_REGO_RSRA(0,CID,NR),BEN,VAL);		\
569	} else {							\
570		WRITE_RCM(SC, HE_REGO_RSRB((SC)->rsrb,CID,(NR-8)),BEN,VAL);\
571	}								\
572    } while(0)
573
574#ifdef HATM_DEBUG
575#define DBG(SC, FL, PRINT) do {						\
576	if((SC)->debug & DBG_##FL) { 					\
577		if_printf(&(SC)->ifatm.ifnet, "%s: ", __func__);	\
578		printf PRINT;						\
579		printf("\n");						\
580	}								\
581    } while (0)
582
583enum {
584	DBG_RX		= 0x0001,
585	DBG_TX		= 0x0002,
586	DBG_VCC		= 0x0004,
587	DBG_IOCTL	= 0x0008,
588	DBG_ATTACH	= 0x0010,
589	DBG_INTR	= 0x0020,
590	DBG_DMA		= 0x0040,
591	DBG_DMAH	= 0x0080,
592
593	DBG_ALL		= 0x00ff
594};
595
596#else
597#define DBG(SC, FL, PRINT)
598#endif
599
600u_int hatm_cps2atmf(uint32_t);
601u_int hatm_atmf2cps(uint32_t);
602
603void hatm_intr(void *);
604int hatm_ioctl(struct ifnet *, u_long, caddr_t);
605void hatm_initialize(struct hatm_softc *);
606void hatm_stop(struct hatm_softc *sc);
607void hatm_start(struct ifnet *);
608
609void hatm_rx(struct hatm_softc *sc, u_int cid, u_int flags, struct mbuf *m,
610    u_int len);
611void hatm_tx_complete(struct hatm_softc *sc, struct tpd *tpd, uint32_t);
612
613int hatm_tx_vcc_can_open(struct hatm_softc *sc, u_int cid, struct hevcc *);
614void hatm_tx_vcc_open(struct hatm_softc *sc, u_int cid);
615void hatm_rx_vcc_open(struct hatm_softc *sc, u_int cid);
616void hatm_tx_vcc_close(struct hatm_softc *sc, u_int cid);
617void hatm_rx_vcc_close(struct hatm_softc *sc, u_int cid);
618void hatm_tx_vcc_closed(struct hatm_softc *sc, u_int cid);
619void hatm_vcc_closed(struct hatm_softc *sc, u_int cid);
620