1/*
2  Madge Horizon ATM Adapter driver.
3  Copyright (C) 1995-1999  Madge Networks Ltd.
4
5  This program is free software; you can redistribute it and/or modify
6  it under the terms of the GNU General Public License as published by
7  the Free Software Foundation; either version 2 of the License, or
8  (at your option) any later version.
9
10  This program is distributed in the hope that it will be useful,
11  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  GNU General Public License for more details.
14
15  You should have received a copy of the GNU General Public License
16  along with this program; if not, write to the Free Software
17  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18
19  The GNU GPL is contained in /usr/doc/copyright/GPL on a Debian
20  system and in the file COPYING in the Linux kernel source.
21*/
22
23/*
24  IMPORTANT NOTE: Madge Networks no longer makes the adapters
25  supported by this driver and makes no commitment to maintain it.
26*/
27
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/mm.h>
31#include <linux/pci.h>
32#include <linux/errno.h>
33#include <linux/atm.h>
34#include <linux/atmdev.h>
35#include <linux/sonet.h>
36#include <linux/skbuff.h>
37#include <linux/time.h>
38#include <linux/delay.h>
39#include <linux/uio.h>
40#include <linux/init.h>
41#include <linux/ioport.h>
42#include <linux/wait.h>
43
44#include <asm/system.h>
45#include <asm/io.h>
46#include <asm/atomic.h>
47#include <asm/uaccess.h>
48#include <asm/string.h>
49#include <asm/byteorder.h>
50
51#include "horizon.h"
52
53#define maintainer_string "Giuliano Procida at Madge Networks <gprocida@madge.com>"
54#define description_string "Madge ATM Horizon [Ultra] driver"
55#define version_string "1.2.1"
56
57static inline void __init show_version (void) {
58  printk ("%s version %s\n", description_string, version_string);
59}
60
61/*
62
63  CREDITS
64
65  Driver and documentation by:
66
67  Chris Aston        Madge Networks
68  Giuliano Procida   Madge Networks
69  Simon Benham       Madge Networks
70  Simon Johnson      Madge Networks
71  Various Others     Madge Networks
72
73  Some inspiration taken from other drivers by:
74
75  Alexandru Cucos    UTBv
76  Kari Mettinen      University of Helsinki
77  Werner Almesberger EPFL LRC
78
79  Theory of Operation
80
81  I Hardware, detection, initialisation and shutdown.
82
83  1. Supported Hardware
84
85  This driver should handle all variants of the PCI Madge ATM adapters
86  with the Horizon chipset. These are all PCI cards supporting PIO, BM
87  DMA and a form of MMIO (registers only, not internal RAM).
88
89  The driver is only known to work with SONET and UTP Horizon Ultra
90  cards at 155Mb/s. However, code is in place to deal with both the
91  original Horizon and 25Mb/s operation.
92
93  There are two revisions of the Horizon ASIC: the original and the
94  Ultra. Details of hardware bugs are in section III.
95
96  The ASIC version can be distinguished by chip markings but is NOT
97  indicated by the PCI revision (all adapters seem to have PCI rev 1).
98
99  I believe that:
100
101  Horizon       => Collage  25 PCI Adapter (UTP and STP)
102  Horizon Ultra => Collage 155 PCI Client (UTP or SONET)
103  Ambassador x  => Collage 155 PCI Server (completely different)
104
105  Horizon (25Mb/s) is fitted with UTP and STP connectors. It seems to
106  have a Madge B154 plus glue logic serializer. I have also found a
107  really ancient version of this with slightly different glue. It
108  comes with the revision 0 (140-025-01) ASIC.
109
110  Horizon Ultra (155Mb/s) is fitted with either a Pulse Medialink
111  output (UTP) or an HP HFBR 5205 output (SONET). It has either
112  Madge's SAMBA framer or a SUNI-lite device (early versions). It
113  comes with the revision 1 (140-027-01) ASIC.
114
115  2. Detection
116
117  All Horizon-based cards present with the same PCI Vendor and Device
118  IDs. The standard Linux 2.2 PCI API is used to locate any cards and
119  to enable bus-mastering (with appropriate latency).
120
121  ATM_LAYER_STATUS in the control register distinguishes between the
122  two possible physical layers (25 and 155). It is not clear whether
123  the 155 cards can also operate at 25Mbps. We rely on the fact that a
124  card operates at 155 if and only if it has the newer Horizon Ultra
125  ASIC.
126
127  For 155 cards the two possible framers are probed for and then set
128  up for loop-timing.
129
130  3. Initialisation
131
132  The card is reset and then put into a known state. The physical
133  layer is configured for normal operation at the appropriate speed;
134  in the case of the 155 cards, the framer is initialised with
135  line-based timing; the internal RAM is zeroed and the allocation of
136  buffers for RX and TX is made; the Burnt In Address is read and
137  copied to the ATM ESI; various policy settings for RX (VPI bits,
138  unknown VCs, oam cells) are made. Ideally all policy items should be
139  configurable at module load (if not actually on-demand), however,
140  only the vpi vs vci bit allocation can be specified at insmod.
141
142  4. Shutdown
143
144  This is in response to module_cleaup. No VCs are in use and the card
145  should be idle; it is reset.
146
147  II Driver software (as it should be)
148
149  0. Traffic Parameters
150
151  The traffic classes (not an enumeration) are currently: ATM_NONE (no
152  traffic), ATM_UBR, ATM_CBR, ATM_VBR and ATM_ABR, ATM_ANYCLASS
153  (compatible with everything). Together with (perhaps only some of)
154  the following items they make up the traffic specification.
155
156  struct atm_trafprm {
157    unsigned char traffic_class; traffic class (ATM_UBR, ...)
158    int           max_pcr;       maximum PCR in cells per second
159    int           pcr;           desired PCR in cells per second
160    int           min_pcr;       minimum PCR in cells per second
161    int           max_cdv;       maximum CDV in microseconds
162    int           max_sdu;       maximum SDU in bytes
163  };
164
165  Note that these denote bandwidth available not bandwidth used; the
166  possibilities according to ATMF are:
167
168  Real Time (cdv and max CDT given)
169
170  CBR(pcr)             pcr bandwidth always available
171  rtVBR(pcr,scr,mbs)   scr bandwidth always available, upto pcr at mbs too
172
173  Non Real Time
174
175  nrtVBR(pcr,scr,mbs)  scr bandwidth always available, upto pcr at mbs too
176  UBR()
177  ABR(mcr,pcr)         mcr bandwidth always available, upto pcr (depending) too
178
179  mbs is max burst size (bucket)
180  pcr and scr have associated cdvt values
181  mcr is like scr but has no cdtv
182  cdtv may differ at each hop
183
184  Some of the above items are qos items (as opposed to traffic
185  parameters). We have nothing to do with qos. All except ABR can have
186  their traffic parameters converted to GCRA parameters. The GCRA may
187  be implemented as a (real-number) leaky bucket. The GCRA can be used
188  in complicated ways by switches and in simpler ways by end-stations.
189  It can be used both to filter incoming cells and shape out-going
190  cells.
191
192  ATM Linux actually supports:
193
194  ATM_NONE() (no traffic in this direction)
195  ATM_UBR(max_frame_size)
196  ATM_CBR(max/min_pcr, max_cdv, max_frame_size)
197
198  0 or ATM_MAX_PCR are used to indicate maximum available PCR
199
200  A traffic specification consists of the AAL type and separate
201  traffic specifications for either direction. In ATM Linux it is:
202
203  struct atm_qos {
204  struct atm_trafprm txtp;
205  struct atm_trafprm rxtp;
206  unsigned char aal;
207  };
208
209  AAL types are:
210
211  ATM_NO_AAL    AAL not specified
212  ATM_AAL0      "raw" ATM cells
213  ATM_AAL1      AAL1 (CBR)
214  ATM_AAL2      AAL2 (VBR)
215  ATM_AAL34     AAL3/4 (data)
216  ATM_AAL5      AAL5 (data)
217  ATM_SAAL      signaling AAL
218
219  The Horizon has support for AAL frame types: 0, 3/4 and 5. However,
220  it does not implement AAL 3/4 SAR and it has a different notion of
221  "raw cell" to ATM Linux's (48 bytes vs. 52 bytes) so neither are
222  supported by this driver.
223
224  The Horizon has limited support for ABR (including UBR), VBR and
225  CBR. Each TX channel has a bucket (containing up to 31 cell units)
226  and two timers (PCR and SCR) associated with it that can be used to
227  govern cell emissions and host notification (in the case of ABR this
228  is presumably so that RM cells may be emitted at appropriate times).
229  The timers may either be disabled or may be set to any of 240 values
230  (determined by the clock crystal, a fixed (?) per-device divider, a
231  configurable divider and a configurable timer preload value).
232
233  At the moment only UBR and CBR are supported by the driver. VBR will
234  be supported as soon as ATM for Linux supports it. ABR support is
235  very unlikely as RM cell handling is completely up to the driver.
236
237  1. TX (TX channel setup and TX transfer)
238
239  The TX half of the driver owns the TX Horizon registers. The TX
240  component in the IRQ handler is the BM completion handler. This can
241  only be entered when tx_busy is true (enforced by hardware). The
242  other TX component can only be entered when tx_busy is false
243  (enforced by driver). So TX is single-threaded.
244
245  Apart from a minor optimisation to not re-select the last channel,
246  the TX send component works as follows:
247
248  Atomic test and set tx_busy until we succeed; we should implement
249  some sort of timeout so that tx_busy will never be stuck at true.
250
251  If no TX channel is set up for this VC we wait for an idle one (if
252  necessary) and set it up.
253
254  At this point we have a TX channel ready for use. We wait for enough
255  buffers to become available then start a TX transmit (set the TX
256  descriptor, schedule transfer, exit).
257
258  The IRQ component handles TX completion (stats, free buffer, tx_busy
259  unset, exit). We also re-schedule further transfers for the same
260  frame if needed.
261
262  TX setup in more detail:
263
264  TX open is a nop, the relevant information is held in the hrz_vcc
265  (vcc->dev_data) structure and is "cached" on the card.
266
267  TX close gets the TX lock and clears the channel from the "cache".
268
269  2. RX (Data Available and RX transfer)
270
271  The RX half of the driver owns the RX registers. There are two RX
272  components in the IRQ handler: the data available handler deals with
273  fresh data that has arrived on the card, the BM completion handler
274  is very similar to the TX completion handler. The data available
275  handler grabs the rx_lock and it is only released once the data has
276  been discarded or completely transferred to the host. The BM
277  completion handler only runs when the lock is held; the data
278  available handler is locked out over the same period.
279
280  Data available on the card triggers an interrupt. If the data is not
281  suitable for our existing RX channels or we cannot allocate a buffer
282  it is flushed. Otherwise an RX receive is scheduled. Multiple RX
283  transfers may be scheduled for the same frame.
284
285  RX setup in more detail:
286
287  RX open...
288  RX close...
289
290  III Hardware Bugs
291
292  0. Byte vs Word addressing of adapter RAM.
293
294  A design feature; see the .h file (especially the memory map).
295
296  1. Bus Master Data Transfers (original Horizon only, fixed in Ultra)
297
298  The host must not start a transmit direction transfer at a
299  non-four-byte boundary in host memory. Instead the host should
300  perform a byte, or a two byte, or one byte followed by two byte
301  transfer in order to start the rest of the transfer on a four byte
302  boundary. RX is OK.
303
304  Simultaneous transmit and receive direction bus master transfers are
305  not allowed.
306
307  The simplest solution to these two is to always do PIO (never DMA)
308  in the TX direction on the original Horizon. More complicated
309  solutions are likely to hurt my brain.
310
311  2. Loss of buffer on close VC
312
313  When a VC is being closed, the buffer associated with it is not
314  returned to the pool. The host must store the reference to this
315  buffer and when opening a new VC then give it to that new VC.
316
317  The host intervention currently consists of stacking such a buffer
318  pointer at VC close and checking the stack at VC open.
319
320  3. Failure to close a VC
321
322  If a VC is currently receiving a frame then closing the VC may fail
323  and the frame continues to be received.
324
325  The solution is to make sure any received frames are flushed when
326  ready. This is currently done just before the solution to 2.
327
328  4. PCI bus (original Horizon only, fixed in Ultra)
329
330  Reading from the data port prior to initialisation will hang the PCI
331  bus. Just don't do that then! We don't.
332
333  IV To Do List
334
335  . Timer code may be broken.
336
337  . Allow users to specify buffer allocation split for TX and RX.
338
339  . Deal once and for all with buggy VC close.
340
341  . Handle interrupted and/or non-blocking operations.
342
343  . Change some macros to functions and move from .h to .c.
344
345  . Try to limit the number of TX frames each VC may have queued, in
346    order to reduce the chances of TX buffer exhaustion.
347
348  . Implement VBR (bucket and timers not understood) and ABR (need to
349    do RM cells manually); also no Linux support for either.
350
351  . Implement QoS changes on open VCs (involves extracting parts of VC open
352    and close into separate functions and using them to make changes).
353
354*/
355
356/********** globals **********/
357
358static void do_housekeeping (unsigned long arg);
359
360static unsigned short debug = 0;
361static unsigned short vpi_bits = 0;
362static int max_tx_size = 9000;
363static int max_rx_size = 9000;
364static unsigned char pci_lat = 0;
365
366/********** access functions **********/
367
368/* Read / Write Horizon registers */
369static inline void wr_regl (const hrz_dev * dev, unsigned char reg, u32 data) {
370  outl (cpu_to_le32 (data), dev->iobase + reg);
371}
372
373static inline u32 rd_regl (const hrz_dev * dev, unsigned char reg) {
374  return le32_to_cpu (inl (dev->iobase + reg));
375}
376
377static inline void wr_regw (const hrz_dev * dev, unsigned char reg, u16 data) {
378  outw (cpu_to_le16 (data), dev->iobase + reg);
379}
380
381static inline u16 rd_regw (const hrz_dev * dev, unsigned char reg) {
382  return le16_to_cpu (inw (dev->iobase + reg));
383}
384
385static inline void wrs_regb (const hrz_dev * dev, unsigned char reg, void * addr, u32 len) {
386  outsb (dev->iobase + reg, addr, len);
387}
388
389static inline void rds_regb (const hrz_dev * dev, unsigned char reg, void * addr, u32 len) {
390  insb (dev->iobase + reg, addr, len);
391}
392
393/* Read / Write to a given address in Horizon buffer memory.
394   Interrupts must be disabled between the address register and data
395   port accesses as these must form an atomic operation. */
396static inline void wr_mem (const hrz_dev * dev, HDW * addr, u32 data) {
397  // wr_regl (dev, MEM_WR_ADDR_REG_OFF, (u32) addr);
398  wr_regl (dev, MEM_WR_ADDR_REG_OFF, (addr - (HDW *) 0) * sizeof(HDW));
399  wr_regl (dev, MEMORY_PORT_OFF, data);
400}
401
402static inline u32 rd_mem (const hrz_dev * dev, HDW * addr) {
403  // wr_regl (dev, MEM_RD_ADDR_REG_OFF, (u32) addr);
404  wr_regl (dev, MEM_RD_ADDR_REG_OFF, (addr - (HDW *) 0) * sizeof(HDW));
405  return rd_regl (dev, MEMORY_PORT_OFF);
406}
407
408static inline void wr_framer (const hrz_dev * dev, u32 addr, u32 data) {
409  wr_regl (dev, MEM_WR_ADDR_REG_OFF, (u32) addr | 0x80000000);
410  wr_regl (dev, MEMORY_PORT_OFF, data);
411}
412
413static inline u32 rd_framer (const hrz_dev * dev, u32 addr) {
414  wr_regl (dev, MEM_RD_ADDR_REG_OFF, (u32) addr | 0x80000000);
415  return rd_regl (dev, MEMORY_PORT_OFF);
416}
417
418/********** specialised access functions **********/
419
420/* RX */
421
422static inline void FLUSH_RX_CHANNEL (hrz_dev * dev, u16 channel) {
423  wr_regw (dev, RX_CHANNEL_PORT_OFF, FLUSH_CHANNEL | channel);
424  return;
425}
426
427static inline void WAIT_FLUSH_RX_COMPLETE (hrz_dev * dev) {
428  while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & FLUSH_CHANNEL)
429    ;
430  return;
431}
432
433static inline void SELECT_RX_CHANNEL (hrz_dev * dev, u16 channel) {
434  wr_regw (dev, RX_CHANNEL_PORT_OFF, channel);
435  return;
436}
437
438static inline void WAIT_UPDATE_COMPLETE (hrz_dev * dev) {
439  while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & RX_CHANNEL_UPDATE_IN_PROGRESS)
440    ;
441  return;
442}
443
444/* TX */
445
446static inline void SELECT_TX_CHANNEL (hrz_dev * dev, u16 tx_channel) {
447  wr_regl (dev, TX_CHANNEL_PORT_OFF, tx_channel);
448  return;
449}
450
451/* Update or query one configuration parameter of a particular channel. */
452
453static inline void update_tx_channel_config (hrz_dev * dev, short chan, u8 mode, u16 value) {
454  wr_regw (dev, TX_CHANNEL_CONFIG_COMMAND_OFF,
455	   chan * TX_CHANNEL_CONFIG_MULT | mode);
456    wr_regw (dev, TX_CHANNEL_CONFIG_DATA_OFF, value);
457    return;
458}
459
460static inline u16 query_tx_channel_config (hrz_dev * dev, short chan, u8 mode) {
461  wr_regw (dev, TX_CHANNEL_CONFIG_COMMAND_OFF,
462	   chan * TX_CHANNEL_CONFIG_MULT | mode);
463    return rd_regw (dev, TX_CHANNEL_CONFIG_DATA_OFF);
464}
465
466/********** dump functions **********/
467
468static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) {
469#ifdef DEBUG_HORIZON
470  unsigned int i;
471  unsigned char * data = skb->data;
472  PRINTDB (DBG_DATA, "%s(%u) ", prefix, vc);
473  for (i=0; i<skb->len && i < 256;i++)
474    PRINTDM (DBG_DATA, "%02x ", data[i]);
475  PRINTDE (DBG_DATA,"");
476#else
477  (void) prefix;
478  (void) vc;
479  (void) skb;
480#endif
481  return;
482}
483
484static inline void dump_regs (hrz_dev * dev) {
485#ifdef DEBUG_HORIZON
486  PRINTD (DBG_REGS, "CONTROL 0: %#x", rd_regl (dev, CONTROL_0_REG));
487  PRINTD (DBG_REGS, "RX CONFIG: %#x", rd_regw (dev, RX_CONFIG_OFF));
488  PRINTD (DBG_REGS, "TX CONFIG: %#x", rd_regw (dev, TX_CONFIG_OFF));
489  PRINTD (DBG_REGS, "TX STATUS: %#x", rd_regw (dev, TX_STATUS_OFF));
490  PRINTD (DBG_REGS, "IRQ ENBLE: %#x", rd_regl (dev, INT_ENABLE_REG_OFF));
491  PRINTD (DBG_REGS, "IRQ SORCE: %#x", rd_regl (dev, INT_SOURCE_REG_OFF));
492#else
493  (void) dev;
494#endif
495  return;
496}
497
498static inline void dump_framer (hrz_dev * dev) {
499#ifdef DEBUG_HORIZON
500  unsigned int i;
501  PRINTDB (DBG_REGS, "framer registers:");
502  for (i = 0; i < 0x10; ++i)
503    PRINTDM (DBG_REGS, " %02x", rd_framer (dev, i));
504  PRINTDE (DBG_REGS,"");
505#else
506  (void) dev;
507#endif
508  return;
509}
510
511/********** VPI/VCI <-> (RX) channel conversions **********/
512
513/* RX channels are 10 bit integers, these fns are quite paranoid */
514
515static inline int channel_to_vpivci (const u16 channel, short * vpi, int * vci) {
516  unsigned short vci_bits = 10 - vpi_bits;
517  if ((channel & RX_CHANNEL_MASK) == channel) {
518    *vci = channel & ((~0)<<vci_bits);
519    *vpi = channel >> vci_bits;
520    return channel ? 0 : -EINVAL;
521  }
522  return -EINVAL;
523}
524
525static inline int vpivci_to_channel (u16 * channel, const short vpi, const int vci) {
526  unsigned short vci_bits = 10 - vpi_bits;
527  if (0 <= vpi && vpi < 1<<vpi_bits && 0 <= vci && vci < 1<<vci_bits) {
528    *channel = vpi<<vci_bits | vci;
529    return *channel ? 0 : -EINVAL;
530  }
531  return -EINVAL;
532}
533
534/********** decode RX queue entries **********/
535
536static inline u16 rx_q_entry_to_length (u32 x) {
537  return x & RX_Q_ENTRY_LENGTH_MASK;
538}
539
540static inline u16 rx_q_entry_to_rx_channel (u32 x) {
541  return (x>>RX_Q_ENTRY_CHANNEL_SHIFT) & RX_CHANNEL_MASK;
542}
543
544/* Cell Transmit Rate Values
545 *
546 * the cell transmit rate (cells per sec) can be set to a variety of
547 * different values by specifying two parameters: a timer preload from
548 * 1 to 16 (stored as 0 to 15) and a clock divider (2 to the power of
549 * an exponent from 0 to 14; the special value 15 disables the timer).
550 *
551 * cellrate = baserate / (preload * 2^divider)
552 *
553 * The maximum cell rate that can be specified is therefore just the
554 * base rate. Halving the preload is equivalent to adding 1 to the
555 * divider and so values 1 to 8 of the preload are redundant except
556 * in the case of a maximal divider (14).
557 *
558 * Given a desired cell rate, an algorithm to determine the preload
559 * and divider is:
560 *
561 * a) x = baserate / cellrate, want p * 2^d = x (as far as possible)
562 * b) if x > 16 * 2^14 then set p = 16, d = 14 (min rate), done
563 *    if x <= 16 then set p = x, d = 0 (high rates), done
564 * c) now have 16 < x <= 2^18, or 1 < x/16 <= 2^14 and we want to
565 *    know n such that 2^(n-1) < x/16 <= 2^n, so slide a bit until
566 *    we find the range (n will be between 1 and 14), set d = n
567 * d) Also have 8 < x/2^n <= 16, so set p nearest x/2^n
568 *
569 * The algorithm used below is a minor variant of the above.
570 *
571 * The base rate is derived from the oscillator frequency (Hz) using a
572 * fixed divider:
573 *
574 * baserate = freq / 32 in the case of some Unknown Card
575 * baserate = freq / 8  in the case of the Horizon        25
576 * baserate = freq / 8  in the case of the Horizon Ultra 155
577 *
578 * The Horizon cards have oscillators and base rates as follows:
579 *
580 * Card               Oscillator  Base Rate
581 * Unknown Card       33 MHz      1.03125 MHz (33 MHz = PCI freq)
582 * Horizon        25  32 MHz      4       MHz
583 * Horizon Ultra 155  40 MHz      5       MHz
584 *
585 * The following defines give the base rates in Hz. These were
586 * previously a factor of 100 larger, no doubt someone was using
587 * cps*100.
588 */
589
590#define BR_UKN 1031250l
591#define BR_HRZ 4000000l
592#define BR_ULT 5000000l
593
594// d is an exponent
595#define CR_MIND 0
596#define CR_MAXD 14
597
598// p ranges from 1 to a power of 2
599#define CR_MAXPEXP 4
600
601static int make_rate (const hrz_dev * dev, u32 c, rounding r,
602		      u16 * bits, unsigned int * actual)
603{
604	// note: rounding the rate down means rounding 'p' up
605	const unsigned long br = test_bit(ultra, &dev->flags) ? BR_ULT : BR_HRZ;
606
607	u32 div = CR_MIND;
608	u32 pre;
609
610	// br_exp and br_man are used to avoid overflowing (c*maxp*2^d) in
611	// the tests below. We could think harder about exact possibilities
612	// of failure...
613
614	unsigned long br_man = br;
615	unsigned int br_exp = 0;
616
617	PRINTD (DBG_QOS|DBG_FLOW, "make_rate b=%lu, c=%u, %s", br, c,
618		r == round_up ? "up" : r == round_down ? "down" : "nearest");
619
620	// avoid div by zero
621	if (!c) {
622		PRINTD (DBG_QOS|DBG_ERR, "zero rate is not allowed!");
623		return -EINVAL;
624	}
625
626	while (br_exp < CR_MAXPEXP + CR_MIND && (br_man % 2 == 0)) {
627		br_man = br_man >> 1;
628		++br_exp;
629	}
630	// (br >>br_exp) <<br_exp == br and
631	// br_exp <= CR_MAXPEXP+CR_MIND
632
633	if (br_man <= (c << (CR_MAXPEXP+CR_MIND-br_exp))) {
634		// Equivalent to: B <= (c << (MAXPEXP+MIND))
635		// take care of rounding
636		switch (r) {
637			case round_down:
638				pre = (br+(c<<div)-1)/(c<<div);
639				// but p must be non-zero
640				if (!pre)
641					pre = 1;
642				break;
643			case round_nearest:
644				pre = (br+(c<<div)/2)/(c<<div);
645				// but p must be non-zero
646				if (!pre)
647					pre = 1;
648				break;
649			default:	/* round_up */
650				pre = br/(c<<div);
651				// but p must be non-zero
652				if (!pre)
653					return -EINVAL;
654		}
655		PRINTD (DBG_QOS, "A: p=%u, d=%u", pre, div);
656		goto got_it;
657	}
658
659	// at this point we have
660	// d == MIND and (c << (MAXPEXP+MIND)) < B
661	while (div < CR_MAXD) {
662		div++;
663		if (br_man <= (c << (CR_MAXPEXP+div-br_exp))) {
664			// Equivalent to: B <= (c << (MAXPEXP+d))
665			// c << (MAXPEXP+d-1) < B <= c << (MAXPEXP+d)
666			// 1 << (MAXPEXP-1) < B/2^d/c <= 1 << MAXPEXP
667			// MAXP/2 < B/c2^d <= MAXP
668			// take care of rounding
669			switch (r) {
670				case round_down:
671					pre = (br+(c<<div)-1)/(c<<div);
672					break;
673				case round_nearest:
674					pre = (br+(c<<div)/2)/(c<<div);
675					break;
676				default: /* round_up */
677					pre = br/(c<<div);
678			}
679			PRINTD (DBG_QOS, "B: p=%u, d=%u", pre, div);
680			goto got_it;
681		}
682	}
683	// at this point we have
684	// d == MAXD and (c << (MAXPEXP+MAXD)) < B
685	// but we cannot go any higher
686	// take care of rounding
687	if (r == round_down)
688		return -EINVAL;
689	pre = 1 << CR_MAXPEXP;
690	PRINTD (DBG_QOS, "C: p=%u, d=%u", pre, div);
691got_it:
692	// paranoia
693	if (div > CR_MAXD || (!pre) || pre > 1<<CR_MAXPEXP) {
694		PRINTD (DBG_QOS, "set_cr internal failure: d=%u p=%u",
695			div, pre);
696		return -EINVAL;
697	} else {
698		if (bits)
699			*bits = (div<<CLOCK_SELECT_SHIFT) | (pre-1);
700		if (actual) {
701			*actual = (br + (pre<<div) - 1) / (pre<<div);
702			PRINTD (DBG_QOS, "actual rate: %u", *actual);
703		}
704		return 0;
705	}
706}
707
708static int make_rate_with_tolerance (const hrz_dev * dev, u32 c, rounding r, unsigned int tol,
709				     u16 * bit_pattern, unsigned int * actual) {
710  unsigned int my_actual;
711
712  PRINTD (DBG_QOS|DBG_FLOW, "make_rate_with_tolerance c=%u, %s, tol=%u",
713	  c, (r == round_up) ? "up" : (r == round_down) ? "down" : "nearest", tol);
714
715  if (!actual)
716    // actual rate is not returned
717    actual = &my_actual;
718
719  if (make_rate (dev, c, round_nearest, bit_pattern, actual))
720    // should never happen as round_nearest always succeeds
721    return -1;
722
723  if (c - tol <= *actual && *actual <= c + tol)
724    // within tolerance
725    return 0;
726  else
727    // intolerant, try rounding instead
728    return make_rate (dev, c, r, bit_pattern, actual);
729}
730
731/********** Listen on a VC **********/
732
733static int hrz_open_rx (hrz_dev * dev, u16 channel) {
734  // is there any guarantee that we don't get two simulataneous
735  // identical calls of this function from different processes? yes
736  // rate_lock
737  unsigned long flags;
738  u32 channel_type; // u16?
739
740  u16 buf_ptr = RX_CHANNEL_IDLE;
741
742  rx_ch_desc * rx_desc = &memmap->rx_descs[channel];
743
744  PRINTD (DBG_FLOW, "hrz_open_rx %x", channel);
745
746  spin_lock_irqsave (&dev->mem_lock, flags);
747  channel_type = rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK;
748  spin_unlock_irqrestore (&dev->mem_lock, flags);
749
750  // very serious error, should never occur
751  if (channel_type != RX_CHANNEL_DISABLED) {
752    PRINTD (DBG_ERR|DBG_VCC, "RX channel for VC already open");
753    return -EBUSY; // clean up?
754  }
755
756  // Give back spare buffer
757  if (dev->noof_spare_buffers) {
758    buf_ptr = dev->spare_buffers[--dev->noof_spare_buffers];
759    PRINTD (DBG_VCC, "using a spare buffer: %u", buf_ptr);
760    // should never occur
761    if (buf_ptr == RX_CHANNEL_DISABLED || buf_ptr == RX_CHANNEL_IDLE) {
762      // but easy to recover from
763      PRINTD (DBG_ERR|DBG_VCC, "bad spare buffer pointer, using IDLE");
764      buf_ptr = RX_CHANNEL_IDLE;
765    }
766  } else {
767    PRINTD (DBG_VCC, "using IDLE buffer pointer");
768  }
769
770  // Channel is currently disabled so change its status to idle
771
772  // do we really need to save the flags again?
773  spin_lock_irqsave (&dev->mem_lock, flags);
774
775  wr_mem (dev, &rx_desc->wr_buf_type,
776	  buf_ptr | CHANNEL_TYPE_AAL5 | FIRST_CELL_OF_AAL5_FRAME);
777  if (buf_ptr != RX_CHANNEL_IDLE)
778    wr_mem (dev, &rx_desc->rd_buf_type, buf_ptr);
779
780  spin_unlock_irqrestore (&dev->mem_lock, flags);
781
782  // rxer->rate = make_rate (qos->peak_cells);
783
784  PRINTD (DBG_FLOW, "hrz_open_rx ok");
785
786  return 0;
787}
788
789
790/********** free an skb (as per ATM device driver documentation) **********/
791
792static inline void hrz_kfree_skb (struct sk_buff * skb) {
793  if (ATM_SKB(skb)->vcc->pop) {
794    ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
795  } else {
796    dev_kfree_skb_any (skb);
797  }
798}
799
800/********** cancel listen on a VC **********/
801
802static void hrz_close_rx (hrz_dev * dev, u16 vc) {
803  unsigned long flags;
804
805  u32 value;
806
807  u32 r1, r2;
808
809  rx_ch_desc * rx_desc = &memmap->rx_descs[vc];
810
811  int was_idle = 0;
812
813  spin_lock_irqsave (&dev->mem_lock, flags);
814  value = rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK;
815  spin_unlock_irqrestore (&dev->mem_lock, flags);
816
817  if (value == RX_CHANNEL_DISABLED) {
818    // I suppose this could happen once we deal with _NONE traffic properly
819    PRINTD (DBG_VCC, "closing VC: RX channel %u already disabled", vc);
820    return;
821  }
822  if (value == RX_CHANNEL_IDLE)
823    was_idle = 1;
824
825  spin_lock_irqsave (&dev->mem_lock, flags);
826
827  for (;;) {
828    wr_mem (dev, &rx_desc->wr_buf_type, RX_CHANNEL_DISABLED);
829
830    if ((rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK) == RX_CHANNEL_DISABLED)
831      break;
832
833    was_idle = 0;
834  }
835
836  if (was_idle) {
837    spin_unlock_irqrestore (&dev->mem_lock, flags);
838    return;
839  }
840
841  WAIT_FLUSH_RX_COMPLETE(dev);
842
843  // handler to discard frames that remain queued for delivery. If the
844  // worry is that immediately reopening the channel (perhaps by a
845  // different process) may cause some data to be mis-delivered then
846  // there may still be a simpler solution (such as busy-waiting on
847  // rx_busy once the channel is disabled or before a new one is
848  // opened - does this leave any holes?). Arguably setting up and
849  // tearing down the TX and RX halves of each virtual circuit could
850  // most safely be done within ?x_busy protected regions.
851
852  // OK, current changes are that Simon's marker is disabled and we DO
853  // look for NULL rxer elsewhere. The code here seems flush frames
854  // and then remember the last dead cell belonging to the channel
855  // just disabled - the cell gets relinked at the next vc_open.
856  // However, when all VCs are closed or only a few opened there are a
857  // handful of buffers that are unusable.
858
859  // Does anyone feel like documenting spare_buffers properly?
860  // Does anyone feel like fixing this in a nicer way?
861
862  // Flush any data which is left in the channel
863  for (;;) {
864    // Change the rx channel port to something different to the RX
865    // channel we are trying to close to force Horizon to flush the rx
866    // channel read and write pointers.
867
868    u16 other = vc^(RX_CHANS/2);
869
870    SELECT_RX_CHANNEL (dev, other);
871    WAIT_UPDATE_COMPLETE (dev);
872
873    r1 = rd_mem (dev, &rx_desc->rd_buf_type);
874
875    // Select this RX channel. Flush doesn't seem to work unless we
876    // select an RX channel before hand
877
878    SELECT_RX_CHANNEL (dev, vc);
879    WAIT_UPDATE_COMPLETE (dev);
880
881    // Attempt to flush a frame on this RX channel
882
883    FLUSH_RX_CHANNEL (dev, vc);
884    WAIT_FLUSH_RX_COMPLETE (dev);
885
886    // Force Horizon to flush rx channel read and write pointers as before
887
888    SELECT_RX_CHANNEL (dev, other);
889    WAIT_UPDATE_COMPLETE (dev);
890
891    r2 = rd_mem (dev, &rx_desc->rd_buf_type);
892
893    PRINTD (DBG_VCC|DBG_RX, "r1 = %u, r2 = %u", r1, r2);
894
895    if (r1 == r2) {
896      dev->spare_buffers[dev->noof_spare_buffers++] = (u16)r1;
897      break;
898    }
899  }
900
901
902  spin_unlock_irqrestore (&dev->mem_lock, flags);
903
904  return;
905}
906
907/********** schedule RX transfers **********/
908
909// Note on tail recursion: a GCC developer said that it is not likely
910// to be fixed soon, so do not define TAILRECUSRIONWORKS unless you
911// are sure it does as you may otherwise overflow the kernel stack.
912
913// giving this fn a return value would help GCC, alledgedly
914
915static void rx_schedule (hrz_dev * dev, int irq) {
916  unsigned int rx_bytes;
917
918  int pio_instead = 0;
919#ifndef TAILRECURSIONWORKS
920  pio_instead = 1;
921  while (pio_instead) {
922#endif
923    // bytes waiting for RX transfer
924    rx_bytes = dev->rx_bytes;
925
926
927    // this code follows the TX code but (at the moment) there is only
928    // one region - the skb itself. I don't know if this will change,
929    // but it doesn't hurt to have the code here, disabled.
930
931    if (rx_bytes) {
932      // start next transfer within same region
933      if (rx_bytes <= MAX_PIO_COUNT) {
934	PRINTD (DBG_RX|DBG_BUS, "(pio)");
935	pio_instead = 1;
936      }
937      if (rx_bytes <= MAX_TRANSFER_COUNT) {
938	PRINTD (DBG_RX|DBG_BUS, "(simple or last multi)");
939	dev->rx_bytes = 0;
940      } else {
941	PRINTD (DBG_RX|DBG_BUS, "(continuing multi)");
942	dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT;
943	rx_bytes = MAX_TRANSFER_COUNT;
944      }
945    } else {
946      // rx_bytes == 0 -- we're between regions
947      // regions remaining to transfer
948      unsigned int rx_regions = 0;
949
950      if (rx_regions) {
951      } else {
952	// rx_regions == 0
953	// that's all folks - end of frame
954	struct sk_buff * skb = dev->rx_skb;
955	// dev->rx_iovec = 0;
956
957	FLUSH_RX_CHANNEL (dev, dev->rx_channel);
958
959	dump_skb ("<<<", dev->rx_channel, skb);
960
961	PRINTD (DBG_RX|DBG_SKB, "push %p %u", skb->data, skb->len);
962
963	{
964	  struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
965	  // VC layer stats
966	  atomic_inc(&vcc->stats->rx);
967	  __net_timestamp(skb);
968	  // end of our responsability
969	  vcc->push (vcc, skb);
970	}
971      }
972    }
973
974    // note: writing RX_COUNT clears any interrupt condition
975    if (rx_bytes) {
976      if (pio_instead) {
977	if (irq)
978	  wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
979	rds_regb (dev, DATA_PORT_OFF, dev->rx_addr, rx_bytes);
980      } else {
981	wr_regl (dev, MASTER_RX_ADDR_REG_OFF, virt_to_bus (dev->rx_addr));
982	wr_regl (dev, MASTER_RX_COUNT_REG_OFF, rx_bytes);
983      }
984      dev->rx_addr += rx_bytes;
985    } else {
986      if (irq)
987	wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
988      // allow another RX thread to start
989      YELLOW_LED_ON(dev);
990      clear_bit (rx_busy, &dev->flags);
991      PRINTD (DBG_RX, "cleared rx_busy for dev %p", dev);
992    }
993
994#ifdef TAILRECURSIONWORKS
995    // and we all bless optimised tail calls
996    if (pio_instead)
997      return rx_schedule (dev, 0);
998    return;
999#else
1000    // grrrrrrr!
1001    irq = 0;
1002  }
1003  return;
1004#endif
1005}
1006
1007/********** handle RX bus master complete events **********/
1008
1009static inline void rx_bus_master_complete_handler (hrz_dev * dev) {
1010  if (test_bit (rx_busy, &dev->flags)) {
1011    rx_schedule (dev, 1);
1012  } else {
1013    PRINTD (DBG_RX|DBG_ERR, "unexpected RX bus master completion");
1014    // clear interrupt condition on adapter
1015    wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
1016  }
1017  return;
1018}
1019
1020/********** (queue to) become the next TX thread **********/
1021
1022static inline int tx_hold (hrz_dev * dev) {
1023  PRINTD (DBG_TX, "sleeping at tx lock %p %lu", dev, dev->flags);
1024  wait_event_interruptible(dev->tx_queue, (!test_and_set_bit(tx_busy, &dev->flags)));
1025  PRINTD (DBG_TX, "woken at tx lock %p %lu", dev, dev->flags);
1026  if (signal_pending (current))
1027    return -1;
1028  PRINTD (DBG_TX, "set tx_busy for dev %p", dev);
1029  return 0;
1030}
1031
1032/********** allow another TX thread to start **********/
1033
1034static inline void tx_release (hrz_dev * dev) {
1035  clear_bit (tx_busy, &dev->flags);
1036  PRINTD (DBG_TX, "cleared tx_busy for dev %p", dev);
1037  wake_up_interruptible (&dev->tx_queue);
1038}
1039
1040/********** schedule TX transfers **********/
1041
1042static void tx_schedule (hrz_dev * const dev, int irq) {
1043  unsigned int tx_bytes;
1044
1045  int append_desc = 0;
1046
1047  int pio_instead = 0;
1048#ifndef TAILRECURSIONWORKS
1049  pio_instead = 1;
1050  while (pio_instead) {
1051#endif
1052    // bytes in current region waiting for TX transfer
1053    tx_bytes = dev->tx_bytes;
1054
1055
1056    if (tx_bytes) {
1057      // start next transfer within same region
1058      if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) {
1059	PRINTD (DBG_TX|DBG_BUS, "(pio)");
1060	pio_instead = 1;
1061      }
1062      if (tx_bytes <= MAX_TRANSFER_COUNT) {
1063	PRINTD (DBG_TX|DBG_BUS, "(simple or last multi)");
1064	if (!dev->tx_iovec) {
1065	  // end of last region
1066	  append_desc = 1;
1067	}
1068	dev->tx_bytes = 0;
1069      } else {
1070	PRINTD (DBG_TX|DBG_BUS, "(continuing multi)");
1071	dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT;
1072	tx_bytes = MAX_TRANSFER_COUNT;
1073      }
1074    } else {
1075      // tx_bytes == 0 -- we're between regions
1076      // regions remaining to transfer
1077      unsigned int tx_regions = dev->tx_regions;
1078
1079      if (tx_regions) {
1080	// start a new region
1081	dev->tx_addr = dev->tx_iovec->iov_base;
1082	tx_bytes = dev->tx_iovec->iov_len;
1083	++dev->tx_iovec;
1084	dev->tx_regions = tx_regions - 1;
1085
1086	if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) {
1087	  PRINTD (DBG_TX|DBG_BUS, "(pio)");
1088	  pio_instead = 1;
1089	}
1090	if (tx_bytes <= MAX_TRANSFER_COUNT) {
1091	  PRINTD (DBG_TX|DBG_BUS, "(full region)");
1092	  dev->tx_bytes = 0;
1093	} else {
1094	  PRINTD (DBG_TX|DBG_BUS, "(start multi region)");
1095	  dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT;
1096	  tx_bytes = MAX_TRANSFER_COUNT;
1097	}
1098      } else {
1099	// tx_regions == 0
1100	// that's all folks - end of frame
1101	struct sk_buff * skb = dev->tx_skb;
1102	dev->tx_iovec = NULL;
1103
1104	// VC layer stats
1105	atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
1106
1107	// free the skb
1108	hrz_kfree_skb (skb);
1109      }
1110    }
1111
1112    // note: writing TX_COUNT clears any interrupt condition
1113    if (tx_bytes) {
1114      if (pio_instead) {
1115	if (irq)
1116	  wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
1117	wrs_regb (dev, DATA_PORT_OFF, dev->tx_addr, tx_bytes);
1118	if (append_desc)
1119	  wr_regl (dev, TX_DESCRIPTOR_PORT_OFF, cpu_to_be32 (dev->tx_skb->len));
1120      } else {
1121	wr_regl (dev, MASTER_TX_ADDR_REG_OFF, virt_to_bus (dev->tx_addr));
1122	if (append_desc)
1123	  wr_regl (dev, TX_DESCRIPTOR_REG_OFF, cpu_to_be32 (dev->tx_skb->len));
1124	wr_regl (dev, MASTER_TX_COUNT_REG_OFF,
1125		 append_desc
1126		 ? tx_bytes | MASTER_TX_AUTO_APPEND_DESC
1127		 : tx_bytes);
1128      }
1129      dev->tx_addr += tx_bytes;
1130    } else {
1131      if (irq)
1132	wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
1133      YELLOW_LED_ON(dev);
1134      tx_release (dev);
1135    }
1136
1137#ifdef TAILRECURSIONWORKS
1138    // and we all bless optimised tail calls
1139    if (pio_instead)
1140      return tx_schedule (dev, 0);
1141    return;
1142#else
1143    // grrrrrrr!
1144    irq = 0;
1145  }
1146  return;
1147#endif
1148}
1149
1150/********** handle TX bus master complete events **********/
1151
1152static inline void tx_bus_master_complete_handler (hrz_dev * dev) {
1153  if (test_bit (tx_busy, &dev->flags)) {
1154    tx_schedule (dev, 1);
1155  } else {
1156    PRINTD (DBG_TX|DBG_ERR, "unexpected TX bus master completion");
1157    // clear interrupt condition on adapter
1158    wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
1159  }
1160  return;
1161}
1162
1163/********** move RX Q pointer to next item in circular buffer **********/
1164
1165// called only from IRQ sub-handler
1166static inline u32 rx_queue_entry_next (hrz_dev * dev) {
1167  u32 rx_queue_entry;
1168  spin_lock (&dev->mem_lock);
1169  rx_queue_entry = rd_mem (dev, &dev->rx_q_entry->entry);
1170  if (dev->rx_q_entry == dev->rx_q_wrap)
1171    dev->rx_q_entry = dev->rx_q_reset;
1172  else
1173    dev->rx_q_entry++;
1174  wr_regw (dev, RX_QUEUE_RD_PTR_OFF, dev->rx_q_entry - dev->rx_q_reset);
1175  spin_unlock (&dev->mem_lock);
1176  return rx_queue_entry;
1177}
1178
1179/********** handle RX disabled by device **********/
1180
1181static inline void rx_disabled_handler (hrz_dev * dev) {
1182  wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE);
1183  // count me please
1184  PRINTK (KERN_WARNING, "RX was disabled!");
1185}
1186
1187/********** handle RX data received by device **********/
1188
1189// called from IRQ handler
1190static inline void rx_data_av_handler (hrz_dev * dev) {
1191  u32 rx_queue_entry;
1192  u32 rx_queue_entry_flags;
1193  u16 rx_len;
1194  u16 rx_channel;
1195
1196  PRINTD (DBG_FLOW, "hrz_data_av_handler");
1197
1198  // try to grab rx lock (not possible during RX bus mastering)
1199  if (test_and_set_bit (rx_busy, &dev->flags)) {
1200    PRINTD (DBG_RX, "locked out of rx lock");
1201    return;
1202  }
1203  PRINTD (DBG_RX, "set rx_busy for dev %p", dev);
1204  // lock is cleared if we fail now, o/w after bus master completion
1205
1206  YELLOW_LED_OFF(dev);
1207
1208  rx_queue_entry = rx_queue_entry_next (dev);
1209
1210  rx_len = rx_q_entry_to_length (rx_queue_entry);
1211  rx_channel = rx_q_entry_to_rx_channel (rx_queue_entry);
1212
1213  WAIT_FLUSH_RX_COMPLETE (dev);
1214
1215  SELECT_RX_CHANNEL (dev, rx_channel);
1216
1217  PRINTD (DBG_RX, "rx_queue_entry is: %#x", rx_queue_entry);
1218  rx_queue_entry_flags = rx_queue_entry & (RX_CRC_32_OK|RX_COMPLETE_FRAME|SIMONS_DODGEY_MARKER);
1219
1220  if (!rx_len) {
1221    // (at least) bus-mastering breaks if we try to handle a
1222    // zero-length frame, besides AAL5 does not support them
1223    PRINTK (KERN_ERR, "zero-length frame!");
1224    rx_queue_entry_flags &= ~RX_COMPLETE_FRAME;
1225  }
1226
1227  if (rx_queue_entry_flags & SIMONS_DODGEY_MARKER) {
1228    PRINTD (DBG_RX|DBG_ERR, "Simon's marker detected!");
1229  }
1230  if (rx_queue_entry_flags == (RX_CRC_32_OK | RX_COMPLETE_FRAME)) {
1231    struct atm_vcc * atm_vcc;
1232
1233    PRINTD (DBG_RX, "got a frame on rx_channel %x len %u", rx_channel, rx_len);
1234
1235    atm_vcc = dev->rxer[rx_channel];
1236    // if no vcc is assigned to this channel, we should drop the frame
1237    // (is this what SIMONS etc. was trying to achieve?)
1238
1239    if (atm_vcc) {
1240
1241      if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) {
1242
1243	if (rx_len <= atm_vcc->qos.rxtp.max_sdu) {
1244
1245	  struct sk_buff * skb = atm_alloc_charge (atm_vcc, rx_len, GFP_ATOMIC);
1246	  if (skb) {
1247	    // remember this so we can push it later
1248	    dev->rx_skb = skb;
1249	    // remember this so we can flush it later
1250	    dev->rx_channel = rx_channel;
1251
1252	    // prepare socket buffer
1253	    skb_put (skb, rx_len);
1254	    ATM_SKB(skb)->vcc = atm_vcc;
1255
1256	    // simple transfer
1257	    // dev->rx_regions = 0;
1258	    // dev->rx_iovec = 0;
1259	    dev->rx_bytes = rx_len;
1260	    dev->rx_addr = skb->data;
1261	    PRINTD (DBG_RX, "RX start simple transfer (addr %p, len %d)",
1262		    skb->data, rx_len);
1263
1264	    // do the business
1265	    rx_schedule (dev, 0);
1266	    return;
1267
1268	  } else {
1269	    PRINTD (DBG_SKB|DBG_WARN, "failed to get skb");
1270	  }
1271
1272	} else {
1273	  PRINTK (KERN_INFO, "frame received on TX-only VC %x", rx_channel);
1274	  // do we count this?
1275	}
1276
1277      } else {
1278	PRINTK (KERN_WARNING, "dropped over-size frame");
1279	// do we count this?
1280      }
1281
1282    } else {
1283      PRINTD (DBG_WARN|DBG_VCC|DBG_RX, "no VCC for this frame (VC closed)");
1284      // do we count this?
1285    }
1286
1287  } else {
1288    // Wait update complete ? SPONG
1289  }
1290
1291  // RX was aborted
1292  YELLOW_LED_ON(dev);
1293
1294  FLUSH_RX_CHANNEL (dev,rx_channel);
1295  clear_bit (rx_busy, &dev->flags);
1296
1297  return;
1298}
1299
1300/********** interrupt handler **********/
1301
1302static irqreturn_t interrupt_handler(int irq, void *dev_id) {
1303  hrz_dev * dev = (hrz_dev *) dev_id;
1304  u32 int_source;
1305  unsigned int irq_ok;
1306
1307  PRINTD (DBG_FLOW, "interrupt_handler: %p", dev_id);
1308
1309  // definitely for us
1310  irq_ok = 0;
1311  while ((int_source = rd_regl (dev, INT_SOURCE_REG_OFF)
1312	  & INTERESTING_INTERRUPTS)) {
1313    // In the interests of fairness, the (inline) handlers below are
1314    // called in sequence and without immediate return to the head of
1315    // the while loop. This is only of issue for slow hosts (or when
1316    // debugging messages are on). Really slow hosts may find a fast
1317    // sender keeps them permanently in the IRQ handler. :(
1318
1319    // (only an issue for slow hosts) RX completion goes before
1320    // rx_data_av as the former implies rx_busy and so the latter
1321    // would just abort. If it reschedules another transfer
1322    // (continuing the same frame) then it will not clear rx_busy.
1323
1324    // (only an issue for slow hosts) TX completion goes before RX
1325    // data available as it is a much shorter routine - there is the
1326    // chance that any further transfers it schedules will be complete
1327    // by the time of the return to the head of the while loop
1328
1329    if (int_source & RX_BUS_MASTER_COMPLETE) {
1330      ++irq_ok;
1331      PRINTD (DBG_IRQ|DBG_BUS|DBG_RX, "rx_bus_master_complete asserted");
1332      rx_bus_master_complete_handler (dev);
1333    }
1334    if (int_source & TX_BUS_MASTER_COMPLETE) {
1335      ++irq_ok;
1336      PRINTD (DBG_IRQ|DBG_BUS|DBG_TX, "tx_bus_master_complete asserted");
1337      tx_bus_master_complete_handler (dev);
1338    }
1339    if (int_source & RX_DATA_AV) {
1340      ++irq_ok;
1341      PRINTD (DBG_IRQ|DBG_RX, "rx_data_av asserted");
1342      rx_data_av_handler (dev);
1343    }
1344  }
1345  if (irq_ok) {
1346    PRINTD (DBG_IRQ, "work done: %u", irq_ok);
1347  } else {
1348    PRINTD (DBG_IRQ|DBG_WARN, "spurious interrupt source: %#x", int_source);
1349  }
1350
1351  PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler done: %p", dev_id);
1352  if (irq_ok)
1353	return IRQ_HANDLED;
1354  return IRQ_NONE;
1355}
1356
1357/********** housekeeping **********/
1358
1359static void do_housekeeping (unsigned long arg) {
1360  // just stats at the moment
1361  hrz_dev * dev = (hrz_dev *) arg;
1362
1363  // collect device-specific (not driver/atm-linux) stats here
1364  dev->tx_cell_count += rd_regw (dev, TX_CELL_COUNT_OFF);
1365  dev->rx_cell_count += rd_regw (dev, RX_CELL_COUNT_OFF);
1366  dev->hec_error_count += rd_regw (dev, HEC_ERROR_COUNT_OFF);
1367  dev->unassigned_cell_count += rd_regw (dev, UNASSIGNED_CELL_COUNT_OFF);
1368
1369  mod_timer (&dev->housekeeping, jiffies + HZ/10);
1370
1371  return;
1372}
1373
1374/********** find an idle channel for TX and set it up **********/
1375
1376// called with tx_busy set
1377static inline short setup_idle_tx_channel (hrz_dev * dev, hrz_vcc * vcc) {
1378  unsigned short idle_channels;
1379  short tx_channel = -1;
1380  unsigned int spin_count;
1381  PRINTD (DBG_FLOW|DBG_TX, "setup_idle_tx_channel %p", dev);
1382
1383  // better would be to fail immediately, the caller can then decide whether
1384  // to wait or drop (depending on whether this is UBR etc.)
1385  spin_count = 0;
1386  while (!(idle_channels = rd_regw (dev, TX_STATUS_OFF) & IDLE_CHANNELS_MASK)) {
1387    PRINTD (DBG_TX|DBG_WARN, "waiting for idle TX channel");
1388    // delay a bit here
1389    if (++spin_count > 100) {
1390      PRINTD (DBG_TX|DBG_ERR, "spun out waiting for idle TX channel");
1391      return -EBUSY;
1392    }
1393  }
1394
1395  // got an idle channel
1396  {
1397    // tx_idle ensures we look for idle channels in RR order
1398    int chan = dev->tx_idle;
1399
1400    int keep_going = 1;
1401    while (keep_going) {
1402      if (idle_channels & (1<<chan)) {
1403	tx_channel = chan;
1404	keep_going = 0;
1405      }
1406      ++chan;
1407      if (chan == TX_CHANS)
1408	chan = 0;
1409    }
1410
1411    dev->tx_idle = chan;
1412  }
1413
1414  // set up the channel we found
1415  {
1416    // Initialise the cell header in the transmit channel descriptor
1417    // a.k.a. prepare the channel and remember that we have done so.
1418
1419    tx_ch_desc * tx_desc = &memmap->tx_descs[tx_channel];
1420    u32 rd_ptr;
1421    u32 wr_ptr;
1422    u16 channel = vcc->channel;
1423
1424    unsigned long flags;
1425    spin_lock_irqsave (&dev->mem_lock, flags);
1426
1427    // Update the transmit channel record.
1428    dev->tx_channel_record[tx_channel] = channel;
1429
1430    // xBR channel
1431    update_tx_channel_config (dev, tx_channel, RATE_TYPE_ACCESS,
1432			      vcc->tx_xbr_bits);
1433
1434    // Update the PCR counter preload value etc.
1435    update_tx_channel_config (dev, tx_channel, PCR_TIMER_ACCESS,
1436			      vcc->tx_pcr_bits);
1437
1438
1439    // Initialise the read and write buffer pointers
1440    rd_ptr = rd_mem (dev, &tx_desc->rd_buf_type) & BUFFER_PTR_MASK;
1441    wr_ptr = rd_mem (dev, &tx_desc->wr_buf_type) & BUFFER_PTR_MASK;
1442
1443    // idle TX channels should have identical pointers
1444    if (rd_ptr != wr_ptr) {
1445      PRINTD (DBG_TX|DBG_ERR, "TX buffer pointers are broken!");
1446      // spin_unlock... return -E...
1447      // I wonder if gcc would get rid of one of the pointer aliases
1448    }
1449    PRINTD (DBG_TX, "TX buffer pointers are: rd %x, wr %x.",
1450	    rd_ptr, wr_ptr);
1451
1452    switch (vcc->aal) {
1453      case aal0:
1454	PRINTD (DBG_QOS|DBG_TX, "tx_channel: aal0");
1455	rd_ptr |= CHANNEL_TYPE_RAW_CELLS;
1456	wr_ptr |= CHANNEL_TYPE_RAW_CELLS;
1457	break;
1458      case aal34:
1459	PRINTD (DBG_QOS|DBG_TX, "tx_channel: aal34");
1460	rd_ptr |= CHANNEL_TYPE_AAL3_4;
1461	wr_ptr |= CHANNEL_TYPE_AAL3_4;
1462	break;
1463      case aal5:
1464	rd_ptr |= CHANNEL_TYPE_AAL5;
1465	wr_ptr |= CHANNEL_TYPE_AAL5;
1466	// Initialise the CRC
1467	wr_mem (dev, &tx_desc->partial_crc, INITIAL_CRC);
1468	break;
1469    }
1470
1471    wr_mem (dev, &tx_desc->rd_buf_type, rd_ptr);
1472    wr_mem (dev, &tx_desc->wr_buf_type, wr_ptr);
1473
1474    // Write the Cell Header
1475    // Payload Type, CLP and GFC would go here if non-zero
1476    wr_mem (dev, &tx_desc->cell_header, channel);
1477
1478    spin_unlock_irqrestore (&dev->mem_lock, flags);
1479  }
1480
1481  return tx_channel;
1482}
1483
1484/********** send a frame **********/
1485
1486static int hrz_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
1487  unsigned int spin_count;
1488  int free_buffers;
1489  hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
1490  hrz_vcc * vcc = HRZ_VCC(atm_vcc);
1491  u16 channel = vcc->channel;
1492
1493  u32 buffers_required;
1494
1495  /* signed for error return */
1496  short tx_channel;
1497
1498  PRINTD (DBG_FLOW|DBG_TX, "hrz_send vc %x data %p len %u",
1499	  channel, skb->data, skb->len);
1500
1501  dump_skb (">>>", channel, skb);
1502
1503  if (atm_vcc->qos.txtp.traffic_class == ATM_NONE) {
1504    PRINTK (KERN_ERR, "attempt to send on RX-only VC %x", channel);
1505    hrz_kfree_skb (skb);
1506    return -EIO;
1507  }
1508
1509  // don't understand this
1510  ATM_SKB(skb)->vcc = atm_vcc;
1511
1512  if (skb->len > atm_vcc->qos.txtp.max_sdu) {
1513    PRINTK (KERN_ERR, "sk_buff length greater than agreed max_sdu, dropping...");
1514    hrz_kfree_skb (skb);
1515    return -EIO;
1516  }
1517
1518  if (!channel) {
1519    PRINTD (DBG_ERR|DBG_TX, "attempt to transmit on zero (rx_)channel");
1520    hrz_kfree_skb (skb);
1521    return -EIO;
1522  }
1523
1524
1525#ifdef DEBUG_HORIZON
1526  /* wey-hey! */
1527  if (channel == 1023) {
1528    unsigned int i;
1529    unsigned short d = 0;
1530    char * s = skb->data;
1531    if (*s++ == 'D') {
1532      for (i = 0; i < 4; ++i) {
1533	d = (d<<4) | ((*s <= '9') ? (*s - '0') : (*s - 'a' + 10));
1534	++s;
1535      }
1536      PRINTK (KERN_INFO, "debug bitmap is now %hx", debug = d);
1537    }
1538  }
1539#endif
1540
1541  // wait until TX is free and grab lock
1542  if (tx_hold (dev)) {
1543    hrz_kfree_skb (skb);
1544    return -ERESTARTSYS;
1545  }
1546
1547  // Wait for enough space to be available in transmit buffer memory.
1548
1549  // should be number of cells needed + 2 (according to hardware docs)
1550  // = ((framelen+8)+47) / 48 + 2
1551  buffers_required = (skb->len+(ATM_AAL5_TRAILER-1)) / ATM_CELL_PAYLOAD + 3;
1552
1553  // replace with timer and sleep, add dev->tx_buffers_queue (max 1 entry)
1554  spin_count = 0;
1555  while ((free_buffers = rd_regw (dev, TX_FREE_BUFFER_COUNT_OFF)) < buffers_required) {
1556    PRINTD (DBG_TX, "waiting for free TX buffers, got %d of %d",
1557	    free_buffers, buffers_required);
1558    // what is the appropriate delay? implement a timeout? (depending on line speed?)
1559    // mdelay (1);
1560    // what happens if we kill (current_pid, SIGKILL) ?
1561    schedule();
1562    if (++spin_count > 1000) {
1563      PRINTD (DBG_TX|DBG_ERR, "spun out waiting for tx buffers, got %d of %d",
1564	      free_buffers, buffers_required);
1565      tx_release (dev);
1566      hrz_kfree_skb (skb);
1567      return -ERESTARTSYS;
1568    }
1569  }
1570
1571  // Select a channel to transmit the frame on.
1572  if (channel == dev->last_vc) {
1573    PRINTD (DBG_TX, "last vc hack: hit");
1574    tx_channel = dev->tx_last;
1575  } else {
1576    PRINTD (DBG_TX, "last vc hack: miss");
1577    // Are we currently transmitting this VC on one of the channels?
1578    for (tx_channel = 0; tx_channel < TX_CHANS; ++tx_channel)
1579      if (dev->tx_channel_record[tx_channel] == channel) {
1580	PRINTD (DBG_TX, "vc already on channel: hit");
1581	break;
1582      }
1583    if (tx_channel == TX_CHANS) {
1584      PRINTD (DBG_TX, "vc already on channel: miss");
1585      // Find and set up an idle channel.
1586      tx_channel = setup_idle_tx_channel (dev, vcc);
1587      if (tx_channel < 0) {
1588	PRINTD (DBG_TX|DBG_ERR, "failed to get channel");
1589	tx_release (dev);
1590	return tx_channel;
1591      }
1592    }
1593
1594    PRINTD (DBG_TX, "got channel");
1595    SELECT_TX_CHANNEL(dev, tx_channel);
1596
1597    dev->last_vc = channel;
1598    dev->tx_last = tx_channel;
1599  }
1600
1601  PRINTD (DBG_TX, "using channel %u", tx_channel);
1602
1603  YELLOW_LED_OFF(dev);
1604
1605  // TX start transfer
1606
1607  {
1608    unsigned int tx_len = skb->len;
1609    unsigned int tx_iovcnt = skb_shinfo(skb)->nr_frags;
1610    // remember this so we can free it later
1611    dev->tx_skb = skb;
1612
1613    if (tx_iovcnt) {
1614      // scatter gather transfer
1615      dev->tx_regions = tx_iovcnt;
1616      dev->tx_iovec = NULL;		/* @@@ needs rewritten */
1617      dev->tx_bytes = 0;
1618      PRINTD (DBG_TX|DBG_BUS, "TX start scatter-gather transfer (iovec %p, len %d)",
1619	      skb->data, tx_len);
1620      tx_release (dev);
1621      hrz_kfree_skb (skb);
1622      return -EIO;
1623    } else {
1624      // simple transfer
1625      dev->tx_regions = 0;
1626      dev->tx_iovec = NULL;
1627      dev->tx_bytes = tx_len;
1628      dev->tx_addr = skb->data;
1629      PRINTD (DBG_TX|DBG_BUS, "TX start simple transfer (addr %p, len %d)",
1630	      skb->data, tx_len);
1631    }
1632
1633    // and do the business
1634    tx_schedule (dev, 0);
1635
1636  }
1637
1638  return 0;
1639}
1640
1641/********** reset a card **********/
1642
1643static void hrz_reset (const hrz_dev * dev) {
1644  u32 control_0_reg = rd_regl (dev, CONTROL_0_REG);
1645
1646  // why not set RESET_HORIZON to one and wait for the card to
1647  // reassert that bit as zero? Like so:
1648  control_0_reg = control_0_reg & RESET_HORIZON;
1649  wr_regl (dev, CONTROL_0_REG, control_0_reg);
1650  while (control_0_reg & RESET_HORIZON)
1651    control_0_reg = rd_regl (dev, CONTROL_0_REG);
1652
1653  // old reset code retained:
1654  wr_regl (dev, CONTROL_0_REG, control_0_reg |
1655	   RESET_ATM | RESET_RX | RESET_TX | RESET_HOST);
1656  // just guessing here
1657  udelay (1000);
1658
1659  wr_regl (dev, CONTROL_0_REG, control_0_reg);
1660}
1661
1662/********** read the burnt in address **********/
1663
1664static inline void WRITE_IT_WAIT (const hrz_dev *dev, u32 ctrl)
1665{
1666	wr_regl (dev, CONTROL_0_REG, ctrl);
1667	udelay (5);
1668}
1669
1670static inline void CLOCK_IT (const hrz_dev *dev, u32 ctrl)
1671{
1672	// DI must be valid around rising SK edge
1673	WRITE_IT_WAIT(dev, ctrl & ~SEEPROM_SK);
1674	WRITE_IT_WAIT(dev, ctrl | SEEPROM_SK);
1675}
1676
1677static u16 __devinit read_bia (const hrz_dev * dev, u16 addr)
1678{
1679  u32 ctrl = rd_regl (dev, CONTROL_0_REG);
1680
1681  const unsigned int addr_bits = 6;
1682  const unsigned int data_bits = 16;
1683
1684  unsigned int i;
1685
1686  u16 res;
1687
1688  ctrl &= ~(SEEPROM_CS | SEEPROM_SK | SEEPROM_DI);
1689  WRITE_IT_WAIT(dev, ctrl);
1690
1691  // wake Serial EEPROM and send 110 (READ) command
1692  ctrl |=  (SEEPROM_CS | SEEPROM_DI);
1693  CLOCK_IT(dev, ctrl);
1694
1695  ctrl |= SEEPROM_DI;
1696  CLOCK_IT(dev, ctrl);
1697
1698  ctrl &= ~SEEPROM_DI;
1699  CLOCK_IT(dev, ctrl);
1700
1701  for (i=0; i<addr_bits; i++) {
1702    if (addr & (1 << (addr_bits-1)))
1703      ctrl |= SEEPROM_DI;
1704    else
1705      ctrl &= ~SEEPROM_DI;
1706
1707    CLOCK_IT(dev, ctrl);
1708
1709    addr = addr << 1;
1710  }
1711
1712  // we could check that we have DO = 0 here
1713  ctrl &= ~SEEPROM_DI;
1714
1715  res = 0;
1716  for (i=0;i<data_bits;i++) {
1717    res = res >> 1;
1718
1719    CLOCK_IT(dev, ctrl);
1720
1721    if (rd_regl (dev, CONTROL_0_REG) & SEEPROM_DO)
1722      res |= (1 << (data_bits-1));
1723  }
1724
1725  ctrl &= ~(SEEPROM_SK | SEEPROM_CS);
1726  WRITE_IT_WAIT(dev, ctrl);
1727
1728  return res;
1729}
1730
1731/********** initialise a card **********/
1732
1733static int __devinit hrz_init (hrz_dev * dev) {
1734  int onefivefive;
1735
1736  u16 chan;
1737
1738  int buff_count;
1739
1740  HDW * mem;
1741
1742  cell_buf * tx_desc;
1743  cell_buf * rx_desc;
1744
1745  u32 ctrl;
1746
1747  ctrl = rd_regl (dev, CONTROL_0_REG);
1748  PRINTD (DBG_INFO, "ctrl0reg is %#x", ctrl);
1749  onefivefive = ctrl & ATM_LAYER_STATUS;
1750
1751  if (onefivefive)
1752    printk (DEV_LABEL ": Horizon Ultra (at 155.52 MBps)");
1753  else
1754    printk (DEV_LABEL ": Horizon (at 25 MBps)");
1755
1756  printk (":");
1757  // Reset the card to get everything in a known state
1758
1759  printk (" reset");
1760  hrz_reset (dev);
1761
1762  // Clear all the buffer memory
1763
1764  printk (" clearing memory");
1765
1766  for (mem = (HDW *) memmap; mem < (HDW *) (memmap + 1); ++mem)
1767    wr_mem (dev, mem, 0);
1768
1769  printk (" tx channels");
1770
1771  // All transmit eight channels are set up as AAL5 ABR channels with
1772  // a 16us cell spacing. Why?
1773
1774  // Channel 0 gets the free buffer at 100h, channel 1 gets the free
1775  // buffer at 110h etc.
1776
1777  for (chan = 0; chan < TX_CHANS; ++chan) {
1778    tx_ch_desc * tx_desc = &memmap->tx_descs[chan];
1779    cell_buf * buf = &memmap->inittxbufs[chan];
1780
1781    // initialise the read and write buffer pointers
1782    wr_mem (dev, &tx_desc->rd_buf_type, BUF_PTR(buf));
1783    wr_mem (dev, &tx_desc->wr_buf_type, BUF_PTR(buf));
1784
1785    // set the status of the initial buffers to empty
1786    wr_mem (dev, &buf->next, BUFF_STATUS_EMPTY);
1787  }
1788
1789  // Use space bufn3 at the moment for tx buffers
1790
1791  printk (" tx buffers");
1792
1793  tx_desc = memmap->bufn3;
1794
1795  wr_mem (dev, &memmap->txfreebufstart.next, BUF_PTR(tx_desc) | BUFF_STATUS_EMPTY);
1796
1797  for (buff_count = 0; buff_count < BUFN3_SIZE-1; buff_count++) {
1798    wr_mem (dev, &tx_desc->next, BUF_PTR(tx_desc+1) | BUFF_STATUS_EMPTY);
1799    tx_desc++;
1800  }
1801
1802  wr_mem (dev, &tx_desc->next, BUF_PTR(&memmap->txfreebufend) | BUFF_STATUS_EMPTY);
1803
1804  // Initialise the transmit free buffer count
1805  wr_regw (dev, TX_FREE_BUFFER_COUNT_OFF, BUFN3_SIZE);
1806
1807  printk (" rx channels");
1808
1809  // Initialise all of the receive channels to be AAL5 disabled with
1810  // an interrupt threshold of 0
1811
1812  for (chan = 0; chan < RX_CHANS; ++chan) {
1813    rx_ch_desc * rx_desc = &memmap->rx_descs[chan];
1814
1815    wr_mem (dev, &rx_desc->wr_buf_type, CHANNEL_TYPE_AAL5 | RX_CHANNEL_DISABLED);
1816  }
1817
1818  printk (" rx buffers");
1819
1820  // Use space bufn4 at the moment for rx buffers
1821
1822  rx_desc = memmap->bufn4;
1823
1824  wr_mem (dev, &memmap->rxfreebufstart.next, BUF_PTR(rx_desc) | BUFF_STATUS_EMPTY);
1825
1826  for (buff_count = 0; buff_count < BUFN4_SIZE-1; buff_count++) {
1827    wr_mem (dev, &rx_desc->next, BUF_PTR(rx_desc+1) | BUFF_STATUS_EMPTY);
1828
1829    rx_desc++;
1830  }
1831
1832  wr_mem (dev, &rx_desc->next, BUF_PTR(&memmap->rxfreebufend) | BUFF_STATUS_EMPTY);
1833
1834  // Initialise the receive free buffer count
1835  wr_regw (dev, RX_FREE_BUFFER_COUNT_OFF, BUFN4_SIZE);
1836
1837  // Initialize Horizons registers
1838
1839  // TX config
1840  wr_regw (dev, TX_CONFIG_OFF,
1841	   ABR_ROUND_ROBIN | TX_NORMAL_OPERATION | DRVR_DRVRBAR_ENABLE);
1842
1843  // RX config. Use 10-x VC bits, x VP bits, non user cells in channel 0.
1844  wr_regw (dev, RX_CONFIG_OFF,
1845	   DISCARD_UNUSED_VPI_VCI_BITS_SET | NON_USER_CELLS_IN_ONE_CHANNEL | vpi_bits);
1846
1847  // RX line config
1848  wr_regw (dev, RX_LINE_CONFIG_OFF,
1849	   LOCK_DETECT_ENABLE | FREQUENCY_DETECT_ENABLE | GXTALOUT_SELECT_DIV4);
1850
1851  // Set the max AAL5 cell count to be just enough to contain the
1852  // largest AAL5 frame that the user wants to receive
1853  wr_regw (dev, MAX_AAL5_CELL_COUNT_OFF,
1854	   (max_rx_size + ATM_AAL5_TRAILER + ATM_CELL_PAYLOAD - 1) / ATM_CELL_PAYLOAD);
1855
1856  // Enable receive
1857  wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE);
1858
1859  printk (" control");
1860
1861  // Drive the OE of the LEDs then turn the green LED on
1862  ctrl |= GREEN_LED_OE | YELLOW_LED_OE | GREEN_LED | YELLOW_LED;
1863  wr_regl (dev, CONTROL_0_REG, ctrl);
1864
1865  // Test for a 155-capable card
1866
1867  if (onefivefive) {
1868    // Select 155 mode... make this a choice (or: how do we detect
1869    // external line speed and switch?)
1870    ctrl |= ATM_LAYER_SELECT;
1871    wr_regl (dev, CONTROL_0_REG, ctrl);
1872
1873    // test SUNI-lite vs SAMBA
1874
1875    // Register 0x00 in the SUNI will have some of bits 3-7 set, and
1876    // they will always be zero for the SAMBA.  Ha!  Bloody hardware
1877    // engineers.  It'll never work.
1878
1879    if (rd_framer (dev, 0) & 0x00f0) {
1880      // SUNI
1881      printk (" SUNI");
1882
1883      // Reset, just in case
1884      wr_framer (dev, 0x00, 0x0080);
1885      wr_framer (dev, 0x00, 0x0000);
1886
1887      // Configure transmit FIFO
1888      wr_framer (dev, 0x63, rd_framer (dev, 0x63) | 0x0002);
1889
1890      // Set line timed mode
1891      wr_framer (dev, 0x05, rd_framer (dev, 0x05) | 0x0001);
1892    } else {
1893      // SAMBA
1894      printk (" SAMBA");
1895
1896      // Reset, just in case
1897      wr_framer (dev, 0, rd_framer (dev, 0) | 0x0001);
1898      wr_framer (dev, 0, rd_framer (dev, 0) &~ 0x0001);
1899
1900      // Turn off diagnostic loopback and enable line-timed mode
1901      wr_framer (dev, 0, 0x0002);
1902
1903      // Turn on transmit outputs
1904      wr_framer (dev, 2, 0x0B80);
1905    }
1906  } else {
1907    // Select 25 mode
1908    ctrl &= ~ATM_LAYER_SELECT;
1909
1910    // Madge B154 setup
1911    // none required?
1912  }
1913
1914  printk (" LEDs");
1915
1916  GREEN_LED_ON(dev);
1917  YELLOW_LED_ON(dev);
1918
1919  printk (" ESI=");
1920
1921  {
1922    u16 b = 0;
1923    int i;
1924    u8 * esi = dev->atm_dev->esi;
1925
1926    // in the card I have, EEPROM
1927    // addresses 0, 1, 2 contain 0
1928    // addresess 5, 6 etc. contain ffff
1929    // NB: Madge prefix is 00 00 f6 (which is 00 00 6f in Ethernet bit order)
1930    // the read_bia routine gets the BIA in Ethernet bit order
1931
1932    for (i=0; i < ESI_LEN; ++i) {
1933      if (i % 2 == 0)
1934	b = read_bia (dev, i/2 + 2);
1935      else
1936	b = b >> 8;
1937      esi[i] = b & 0xFF;
1938      printk ("%02x", esi[i]);
1939    }
1940  }
1941
1942  // Enable RX_Q and ?X_COMPLETE interrupts only
1943  wr_regl (dev, INT_ENABLE_REG_OFF, INTERESTING_INTERRUPTS);
1944  printk (" IRQ on");
1945
1946  printk (".\n");
1947
1948  return onefivefive;
1949}
1950
1951/********** check max_sdu **********/
1952
1953static int check_max_sdu (hrz_aal aal, struct atm_trafprm * tp, unsigned int max_frame_size) {
1954  PRINTD (DBG_FLOW|DBG_QOS, "check_max_sdu");
1955
1956  switch (aal) {
1957    case aal0:
1958      if (!(tp->max_sdu)) {
1959	PRINTD (DBG_QOS, "defaulting max_sdu");
1960	tp->max_sdu = ATM_AAL0_SDU;
1961      } else if (tp->max_sdu != ATM_AAL0_SDU) {
1962	PRINTD (DBG_QOS|DBG_ERR, "rejecting max_sdu");
1963	return -EINVAL;
1964      }
1965      break;
1966    case aal34:
1967      if (tp->max_sdu == 0 || tp->max_sdu > ATM_MAX_AAL34_PDU) {
1968	PRINTD (DBG_QOS, "%sing max_sdu", tp->max_sdu ? "capp" : "default");
1969	tp->max_sdu = ATM_MAX_AAL34_PDU;
1970      }
1971      break;
1972    case aal5:
1973      if (tp->max_sdu == 0 || tp->max_sdu > max_frame_size) {
1974	PRINTD (DBG_QOS, "%sing max_sdu", tp->max_sdu ? "capp" : "default");
1975	tp->max_sdu = max_frame_size;
1976      }
1977      break;
1978  }
1979  return 0;
1980}
1981
1982/********** check pcr **********/
1983
1984// something like this should be part of ATM Linux
1985static int atm_pcr_check (struct atm_trafprm * tp, unsigned int pcr) {
1986  // we are assuming non-UBR, and non-special values of pcr
1987  if (tp->min_pcr == ATM_MAX_PCR)
1988    PRINTD (DBG_QOS, "luser gave min_pcr = ATM_MAX_PCR");
1989  else if (tp->min_pcr < 0)
1990    PRINTD (DBG_QOS, "luser gave negative min_pcr");
1991  else if (tp->min_pcr && tp->min_pcr > pcr)
1992    PRINTD (DBG_QOS, "pcr less than min_pcr");
1993  else
1994    // !! max_pcr = UNSPEC (0) is equivalent to max_pcr = MAX (-1)
1995    // easier to #define ATM_MAX_PCR 0 and have all rates unsigned?
1996    // [this would get rid of next two conditionals]
1997    if ((0) && tp->max_pcr == ATM_MAX_PCR)
1998      PRINTD (DBG_QOS, "luser gave max_pcr = ATM_MAX_PCR");
1999    else if ((tp->max_pcr != ATM_MAX_PCR) && tp->max_pcr < 0)
2000      PRINTD (DBG_QOS, "luser gave negative max_pcr");
2001    else if (tp->max_pcr && tp->max_pcr != ATM_MAX_PCR && tp->max_pcr < pcr)
2002      PRINTD (DBG_QOS, "pcr greater than max_pcr");
2003    else {
2004      // each limit unspecified or not violated
2005      PRINTD (DBG_QOS, "xBR(pcr) OK");
2006      return 0;
2007    }
2008  PRINTD (DBG_QOS, "pcr=%u, tp: min_pcr=%d, pcr=%d, max_pcr=%d",
2009	  pcr, tp->min_pcr, tp->pcr, tp->max_pcr);
2010  return -EINVAL;
2011}
2012
2013/********** open VC **********/
2014
2015static int hrz_open (struct atm_vcc *atm_vcc)
2016{
2017  int error;
2018  u16 channel;
2019
2020  struct atm_qos * qos;
2021  struct atm_trafprm * txtp;
2022  struct atm_trafprm * rxtp;
2023
2024  hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
2025  hrz_vcc vcc;
2026  hrz_vcc * vccp; // allocated late
2027  short vpi = atm_vcc->vpi;
2028  int vci = atm_vcc->vci;
2029  PRINTD (DBG_FLOW|DBG_VCC, "hrz_open %x %x", vpi, vci);
2030
2031#ifdef ATM_VPI_UNSPEC
2032  // UNSPEC is deprecated, remove this code eventually
2033  if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) {
2034    PRINTK (KERN_WARNING, "rejecting open with unspecified VPI/VCI (deprecated)");
2035    return -EINVAL;
2036  }
2037#endif
2038
2039  error = vpivci_to_channel (&channel, vpi, vci);
2040  if (error) {
2041    PRINTD (DBG_WARN|DBG_VCC, "VPI/VCI out of range: %hd/%d", vpi, vci);
2042    return error;
2043  }
2044
2045  vcc.channel = channel;
2046  // max speed for the moment
2047  vcc.tx_rate = 0x0;
2048
2049  qos = &atm_vcc->qos;
2050
2051  // check AAL and remember it
2052  switch (qos->aal) {
2053    case ATM_AAL0:
2054      // we would if it were 48 bytes and not 52!
2055      PRINTD (DBG_QOS|DBG_VCC, "AAL0");
2056      vcc.aal = aal0;
2057      break;
2058    case ATM_AAL34:
2059      // we would if I knew how do the SAR!
2060      PRINTD (DBG_QOS|DBG_VCC, "AAL3/4");
2061      vcc.aal = aal34;
2062      break;
2063    case ATM_AAL5:
2064      PRINTD (DBG_QOS|DBG_VCC, "AAL5");
2065      vcc.aal = aal5;
2066      break;
2067    default:
2068      PRINTD (DBG_QOS|DBG_VCC, "Bad AAL!");
2069      return -EINVAL;
2070      break;
2071  }
2072
2073  // TX traffic parameters
2074
2075  // there are two, interrelated problems here: 1. the reservation of
2076  // PCR is not a binary choice, we are given bounds and/or a
2077  // desirable value; 2. the device is only capable of certain values,
2078  // most of which are not integers. It is almost certainly acceptable
2079  // to be off by a maximum of 1 to 10 cps.
2080
2081  // Pragmatic choice: always store an integral PCR as that which has
2082  // been allocated, even if we allocate a little (or a lot) less,
2083  // after rounding. The actual allocation depends on what we can
2084  // manage with our rate selection algorithm. The rate selection
2085  // algorithm is given an integral PCR and a tolerance and told
2086  // whether it should round the value up or down if the tolerance is
2087  // exceeded; it returns: a) the actual rate selected (rounded up to
2088  // the nearest integer), b) a bit pattern to feed to the timer
2089  // register, and c) a failure value if no applicable rate exists.
2090
2091  // Part of the job is done by atm_pcr_goal which gives us a PCR
2092  // specification which says: EITHER grab the maximum available PCR
2093  // (and perhaps a lower bound which we musn't pass), OR grab this
2094  // amount, rounding down if you have to (and perhaps a lower bound
2095  // which we musn't pass) OR grab this amount, rounding up if you
2096  // have to (and perhaps an upper bound which we musn't pass). If any
2097  // bounds ARE passed we fail. Note that rounding is only rounding to
2098  // match device limitations, we do not round down to satisfy
2099  // bandwidth availability even if this would not violate any given
2100  // lower bound.
2101
2102  // Note: telephony = 64kb/s = 48 byte cell payload @ 500/3 cells/s
2103  // (say) so this is not even a binary fixpoint cell rate (but this
2104  // device can do it). To avoid this sort of hassle we use a
2105  // tolerance parameter (currently fixed at 10 cps).
2106
2107  PRINTD (DBG_QOS, "TX:");
2108
2109  txtp = &qos->txtp;
2110
2111  // set up defaults for no traffic
2112  vcc.tx_rate = 0;
2113  // who knows what would actually happen if you try and send on this?
2114  vcc.tx_xbr_bits = IDLE_RATE_TYPE;
2115  vcc.tx_pcr_bits = CLOCK_DISABLE;
2116
2117  if (txtp->traffic_class != ATM_NONE) {
2118    error = check_max_sdu (vcc.aal, txtp, max_tx_size);
2119    if (error) {
2120      PRINTD (DBG_QOS, "TX max_sdu check failed");
2121      return error;
2122    }
2123
2124    switch (txtp->traffic_class) {
2125      case ATM_UBR: {
2126	// we take "the PCR" as a rate-cap
2127	// not reserved
2128	vcc.tx_rate = 0;
2129	make_rate (dev, 1<<30, round_nearest, &vcc.tx_pcr_bits, NULL);
2130	vcc.tx_xbr_bits = ABR_RATE_TYPE;
2131	break;
2132      }
2133      case ATM_CBR: {
2134	int pcr = atm_pcr_goal (txtp);
2135	rounding r;
2136	if (!pcr) {
2137	  // down vs. up, remaining bandwidth vs. unlimited bandwidth!!
2138	  // should really have: once someone gets unlimited bandwidth
2139	  // that no more non-UBR channels can be opened until the
2140	  // unlimited one closes?? For the moment, round_down means
2141	  // greedy people actually get something and not nothing
2142	  r = round_down;
2143	  // slight race (no locking) here so we may get -EAGAIN
2144	  // later; the greedy bastards would deserve it :)
2145	  PRINTD (DBG_QOS, "snatching all remaining TX bandwidth");
2146	  pcr = dev->tx_avail;
2147	} else if (pcr < 0) {
2148	  r = round_down;
2149	  pcr = -pcr;
2150	} else {
2151	  r = round_up;
2152	}
2153	error = make_rate_with_tolerance (dev, pcr, r, 10,
2154					  &vcc.tx_pcr_bits, &vcc.tx_rate);
2155	if (error) {
2156	  PRINTD (DBG_QOS, "could not make rate from TX PCR");
2157	  return error;
2158	}
2159	// not really clear what further checking is needed
2160	error = atm_pcr_check (txtp, vcc.tx_rate);
2161	if (error) {
2162	  PRINTD (DBG_QOS, "TX PCR failed consistency check");
2163	  return error;
2164	}
2165	vcc.tx_xbr_bits = CBR_RATE_TYPE;
2166	break;
2167      }
2168      default: {
2169	PRINTD (DBG_QOS, "unsupported TX traffic class");
2170	return -EINVAL;
2171	break;
2172      }
2173    }
2174  }
2175
2176  // RX traffic parameters
2177
2178  PRINTD (DBG_QOS, "RX:");
2179
2180  rxtp = &qos->rxtp;
2181
2182  // set up defaults for no traffic
2183  vcc.rx_rate = 0;
2184
2185  if (rxtp->traffic_class != ATM_NONE) {
2186    error = check_max_sdu (vcc.aal, rxtp, max_rx_size);
2187    if (error) {
2188      PRINTD (DBG_QOS, "RX max_sdu check failed");
2189      return error;
2190    }
2191    switch (rxtp->traffic_class) {
2192      case ATM_UBR: {
2193	// not reserved
2194	break;
2195      }
2196      case ATM_CBR: {
2197	int pcr = atm_pcr_goal (rxtp);
2198	if (!pcr) {
2199	  // slight race (no locking) here so we may get -EAGAIN
2200	  // later; the greedy bastards would deserve it :)
2201	  PRINTD (DBG_QOS, "snatching all remaining RX bandwidth");
2202	  pcr = dev->rx_avail;
2203	} else if (pcr < 0) {
2204	  pcr = -pcr;
2205	}
2206	vcc.rx_rate = pcr;
2207	// not really clear what further checking is needed
2208	error = atm_pcr_check (rxtp, vcc.rx_rate);
2209	if (error) {
2210	  PRINTD (DBG_QOS, "RX PCR failed consistency check");
2211	  return error;
2212	}
2213	break;
2214      }
2215      default: {
2216	PRINTD (DBG_QOS, "unsupported RX traffic class");
2217	return -EINVAL;
2218	break;
2219      }
2220    }
2221  }
2222
2223
2224  // late abort useful for diagnostics
2225  if (vcc.aal != aal5) {
2226    PRINTD (DBG_QOS, "AAL not supported");
2227    return -EINVAL;
2228  }
2229
2230  // get space for our vcc stuff and copy parameters into it
2231  vccp = kmalloc (sizeof(hrz_vcc), GFP_KERNEL);
2232  if (!vccp) {
2233    PRINTK (KERN_ERR, "out of memory!");
2234    return -ENOMEM;
2235  }
2236  *vccp = vcc;
2237
2238  // clear error and grab cell rate resource lock
2239  error = 0;
2240  spin_lock (&dev->rate_lock);
2241
2242  if (vcc.tx_rate > dev->tx_avail) {
2243    PRINTD (DBG_QOS, "not enough TX PCR left");
2244    error = -EAGAIN;
2245  }
2246
2247  if (vcc.rx_rate > dev->rx_avail) {
2248    PRINTD (DBG_QOS, "not enough RX PCR left");
2249    error = -EAGAIN;
2250  }
2251
2252  if (!error) {
2253    // really consume cell rates
2254    dev->tx_avail -= vcc.tx_rate;
2255    dev->rx_avail -= vcc.rx_rate;
2256    PRINTD (DBG_QOS|DBG_VCC, "reserving %u TX PCR and %u RX PCR",
2257	    vcc.tx_rate, vcc.rx_rate);
2258  }
2259
2260  // release lock and exit on error
2261  spin_unlock (&dev->rate_lock);
2262  if (error) {
2263    PRINTD (DBG_QOS|DBG_VCC, "insufficient cell rate resources");
2264    kfree (vccp);
2265    return error;
2266  }
2267
2268  // this is "immediately before allocating the connection identifier
2269  // in hardware" - so long as the next call does not fail :)
2270  set_bit(ATM_VF_ADDR,&atm_vcc->flags);
2271
2272  // any errors here are very serious and should never occur
2273
2274  if (rxtp->traffic_class != ATM_NONE) {
2275    if (dev->rxer[channel]) {
2276      PRINTD (DBG_ERR|DBG_VCC, "VC already open for RX");
2277      error = -EBUSY;
2278    }
2279    if (!error)
2280      error = hrz_open_rx (dev, channel);
2281    if (error) {
2282      kfree (vccp);
2283      return error;
2284    }
2285    // this link allows RX frames through
2286    dev->rxer[channel] = atm_vcc;
2287  }
2288
2289  // success, set elements of atm_vcc
2290  atm_vcc->dev_data = (void *) vccp;
2291
2292  // indicate readiness
2293  set_bit(ATM_VF_READY,&atm_vcc->flags);
2294
2295  return 0;
2296}
2297
2298/********** close VC **********/
2299
2300static void hrz_close (struct atm_vcc * atm_vcc) {
2301  hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
2302  hrz_vcc * vcc = HRZ_VCC(atm_vcc);
2303  u16 channel = vcc->channel;
2304  PRINTD (DBG_VCC|DBG_FLOW, "hrz_close");
2305
2306  // indicate unreadiness
2307  clear_bit(ATM_VF_READY,&atm_vcc->flags);
2308
2309  if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) {
2310    unsigned int i;
2311
2312    // let any TX on this channel that has started complete
2313    // no restart, just keep trying
2314    while (tx_hold (dev))
2315      ;
2316    // remove record of any tx_channel having been setup for this channel
2317    for (i = 0; i < TX_CHANS; ++i)
2318      if (dev->tx_channel_record[i] == channel) {
2319	dev->tx_channel_record[i] = -1;
2320	break;
2321      }
2322    if (dev->last_vc == channel)
2323      dev->tx_last = -1;
2324    tx_release (dev);
2325  }
2326
2327  if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) {
2328    // disable RXing - it tries quite hard
2329    hrz_close_rx (dev, channel);
2330    // forget the vcc - no more skbs will be pushed
2331    if (atm_vcc != dev->rxer[channel])
2332      PRINTK (KERN_ERR, "%s atm_vcc=%p rxer[channel]=%p",
2333	      "arghhh! we're going to die!",
2334	      atm_vcc, dev->rxer[channel]);
2335    dev->rxer[channel] = NULL;
2336  }
2337
2338  // atomically release our rate reservation
2339  spin_lock (&dev->rate_lock);
2340  PRINTD (DBG_QOS|DBG_VCC, "releasing %u TX PCR and %u RX PCR",
2341	  vcc->tx_rate, vcc->rx_rate);
2342  dev->tx_avail += vcc->tx_rate;
2343  dev->rx_avail += vcc->rx_rate;
2344  spin_unlock (&dev->rate_lock);
2345
2346  // free our structure
2347  kfree (vcc);
2348  // say the VPI/VCI is free again
2349  clear_bit(ATM_VF_ADDR,&atm_vcc->flags);
2350}
2351
2352
2353
2354/********** proc file contents **********/
2355
2356static int hrz_proc_read (struct atm_dev * atm_dev, loff_t * pos, char * page) {
2357  hrz_dev * dev = HRZ_DEV(atm_dev);
2358  int left = *pos;
2359  PRINTD (DBG_FLOW, "hrz_proc_read");
2360
2361  /* more diagnostics here? */
2362
2363
2364  if (!left--)
2365    return sprintf (page,
2366		    "cells: TX %lu, RX %lu, HEC errors %lu, unassigned %lu.\n",
2367		    dev->tx_cell_count, dev->rx_cell_count,
2368		    dev->hec_error_count, dev->unassigned_cell_count);
2369
2370  if (!left--)
2371    return sprintf (page,
2372		    "free cell buffers: TX %hu, RX %hu+%hu.\n",
2373		    rd_regw (dev, TX_FREE_BUFFER_COUNT_OFF),
2374		    rd_regw (dev, RX_FREE_BUFFER_COUNT_OFF),
2375		    dev->noof_spare_buffers);
2376
2377  if (!left--)
2378    return sprintf (page,
2379		    "cps remaining: TX %u, RX %u\n",
2380		    dev->tx_avail, dev->rx_avail);
2381
2382  return 0;
2383}
2384
2385static const struct atmdev_ops hrz_ops = {
2386  .open	= hrz_open,
2387  .close	= hrz_close,
2388  .send	= hrz_send,
2389  .proc_read	= hrz_proc_read,
2390  .owner	= THIS_MODULE,
2391};
2392
2393static int __devinit hrz_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
2394{
2395	hrz_dev * dev;
2396	int err = 0;
2397
2398	// adapter slot free, read resources from PCI configuration space
2399	u32 iobase = pci_resource_start (pci_dev, 0);
2400	u32 * membase = bus_to_virt (pci_resource_start (pci_dev, 1));
2401	unsigned int irq;
2402	unsigned char lat;
2403
2404	PRINTD (DBG_FLOW, "hrz_probe");
2405
2406	if (pci_enable_device(pci_dev))
2407		return -EINVAL;
2408
2409	if (!request_region(iobase, HRZ_IO_EXTENT, DEV_LABEL)) {
2410		return -EINVAL;
2411		goto out_disable;
2412	}
2413
2414	dev = kzalloc(sizeof(hrz_dev), GFP_KERNEL);
2415	if (!dev) {
2416		// perhaps we should be nice: deregister all adapters and abort?
2417		PRINTD(DBG_ERR, "out of memory");
2418		err = -ENOMEM;
2419		goto out_release;
2420	}
2421
2422	pci_set_drvdata(pci_dev, dev);
2423
2424	// grab IRQ and install handler - move this someplace more sensible
2425	irq = pci_dev->irq;
2426	if (request_irq(irq,
2427			interrupt_handler,
2428			IRQF_SHARED, /* irqflags guess */
2429			DEV_LABEL, /* name guess */
2430			dev)) {
2431		PRINTD(DBG_WARN, "request IRQ failed!");
2432		err = -EINVAL;
2433		goto out_free;
2434	}
2435
2436	PRINTD(DBG_INFO, "found Madge ATM adapter (hrz) at: IO %x, IRQ %u, MEM %p",
2437	       iobase, irq, membase);
2438
2439	dev->atm_dev = atm_dev_register(DEV_LABEL, &hrz_ops, -1, NULL);
2440	if (!(dev->atm_dev)) {
2441		PRINTD(DBG_ERR, "failed to register Madge ATM adapter");
2442		err = -EINVAL;
2443		goto out_free_irq;
2444	}
2445
2446	PRINTD(DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
2447	       dev->atm_dev->number, dev, dev->atm_dev);
2448	dev->atm_dev->dev_data = (void *) dev;
2449	dev->pci_dev = pci_dev;
2450
2451	// enable bus master accesses
2452	pci_set_master(pci_dev);
2453
2454	// frobnicate latency (upwards, usually)
2455	pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &lat);
2456	if (pci_lat) {
2457		PRINTD(DBG_INFO, "%s PCI latency timer from %hu to %hu",
2458		       "changing", lat, pci_lat);
2459		pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, pci_lat);
2460	} else if (lat < MIN_PCI_LATENCY) {
2461		PRINTK(KERN_INFO, "%s PCI latency timer from %hu to %hu",
2462		       "increasing", lat, MIN_PCI_LATENCY);
2463		pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, MIN_PCI_LATENCY);
2464	}
2465
2466	dev->iobase = iobase;
2467	dev->irq = irq;
2468	dev->membase = membase;
2469
2470	dev->rx_q_entry = dev->rx_q_reset = &memmap->rx_q_entries[0];
2471	dev->rx_q_wrap  = &memmap->rx_q_entries[RX_CHANS-1];
2472
2473	// these next three are performance hacks
2474	dev->last_vc = -1;
2475	dev->tx_last = -1;
2476	dev->tx_idle = 0;
2477
2478	dev->tx_regions = 0;
2479	dev->tx_bytes = 0;
2480	dev->tx_skb = NULL;
2481	dev->tx_iovec = NULL;
2482
2483	dev->tx_cell_count = 0;
2484	dev->rx_cell_count = 0;
2485	dev->hec_error_count = 0;
2486	dev->unassigned_cell_count = 0;
2487
2488	dev->noof_spare_buffers = 0;
2489
2490	{
2491		unsigned int i;
2492		for (i = 0; i < TX_CHANS; ++i)
2493			dev->tx_channel_record[i] = -1;
2494	}
2495
2496	dev->flags = 0;
2497
2498	// Allocate cell rates and remember ASIC version
2499	// Fibre: ATM_OC3_PCR = 1555200000/8/270*260/53 - 29/53
2500	// Copper: (WRONG) we want 6 into the above, close to 25Mb/s
2501	// Copper: (plagarise!) 25600000/8/270*260/53 - n/53
2502
2503	if (hrz_init(dev)) {
2504		// to be really pedantic, this should be ATM_OC3c_PCR
2505		dev->tx_avail = ATM_OC3_PCR;
2506		dev->rx_avail = ATM_OC3_PCR;
2507		set_bit(ultra, &dev->flags); // NOT "|= ultra" !
2508	} else {
2509		dev->tx_avail = ((25600000/8)*26)/(27*53);
2510		dev->rx_avail = ((25600000/8)*26)/(27*53);
2511		PRINTD(DBG_WARN, "Buggy ASIC: no TX bus-mastering.");
2512	}
2513
2514	// rate changes spinlock
2515	spin_lock_init(&dev->rate_lock);
2516
2517	// on-board memory access spinlock; we want atomic reads and
2518	// writes to adapter memory (handles IRQ and SMP)
2519	spin_lock_init(&dev->mem_lock);
2520
2521	init_waitqueue_head(&dev->tx_queue);
2522
2523	// vpi in 0..4, vci in 6..10
2524	dev->atm_dev->ci_range.vpi_bits = vpi_bits;
2525	dev->atm_dev->ci_range.vci_bits = 10-vpi_bits;
2526
2527	init_timer(&dev->housekeeping);
2528	dev->housekeeping.function = do_housekeeping;
2529	dev->housekeeping.data = (unsigned long) dev;
2530	mod_timer(&dev->housekeeping, jiffies);
2531
2532out:
2533	return err;
2534
2535out_free_irq:
2536	free_irq(dev->irq, dev);
2537out_free:
2538	kfree(dev);
2539out_release:
2540	release_region(iobase, HRZ_IO_EXTENT);
2541out_disable:
2542	pci_disable_device(pci_dev);
2543	goto out;
2544}
2545
2546static void __devexit hrz_remove_one(struct pci_dev *pci_dev)
2547{
2548	hrz_dev *dev;
2549
2550	dev = pci_get_drvdata(pci_dev);
2551
2552	PRINTD(DBG_INFO, "closing %p (atm_dev = %p)", dev, dev->atm_dev);
2553	del_timer_sync(&dev->housekeeping);
2554	hrz_reset(dev);
2555	atm_dev_deregister(dev->atm_dev);
2556	free_irq(dev->irq, dev);
2557	release_region(dev->iobase, HRZ_IO_EXTENT);
2558	kfree(dev);
2559
2560	pci_disable_device(pci_dev);
2561}
2562
2563static void __init hrz_check_args (void) {
2564#ifdef DEBUG_HORIZON
2565  PRINTK (KERN_NOTICE, "debug bitmap is %hx", debug &= DBG_MASK);
2566#else
2567  if (debug)
2568    PRINTK (KERN_NOTICE, "no debug support in this image");
2569#endif
2570
2571  if (vpi_bits > HRZ_MAX_VPI)
2572    PRINTK (KERN_ERR, "vpi_bits has been limited to %hu",
2573	    vpi_bits = HRZ_MAX_VPI);
2574
2575  if (max_tx_size < 0 || max_tx_size > TX_AAL5_LIMIT)
2576    PRINTK (KERN_NOTICE, "max_tx_size has been limited to %hu",
2577	    max_tx_size = TX_AAL5_LIMIT);
2578
2579  if (max_rx_size < 0 || max_rx_size > RX_AAL5_LIMIT)
2580    PRINTK (KERN_NOTICE, "max_rx_size has been limited to %hu",
2581	    max_rx_size = RX_AAL5_LIMIT);
2582
2583  return;
2584}
2585
2586MODULE_AUTHOR(maintainer_string);
2587MODULE_DESCRIPTION(description_string);
2588MODULE_LICENSE("GPL");
2589module_param(debug, ushort, 0644);
2590module_param(vpi_bits, ushort, 0);
2591module_param(max_tx_size, int, 0);
2592module_param(max_rx_size, int, 0);
2593module_param(pci_lat, byte, 0);
2594MODULE_PARM_DESC(debug, "debug bitmap, see .h file");
2595MODULE_PARM_DESC(vpi_bits, "number of bits (0..4) to allocate to VPIs");
2596MODULE_PARM_DESC(max_tx_size, "maximum size of TX AAL5 frames");
2597MODULE_PARM_DESC(max_rx_size, "maximum size of RX AAL5 frames");
2598MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles");
2599
2600static struct pci_device_id hrz_pci_tbl[] = {
2601	{ PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_HORIZON, PCI_ANY_ID, PCI_ANY_ID,
2602	  0, 0, 0 },
2603	{ 0, }
2604};
2605
2606MODULE_DEVICE_TABLE(pci, hrz_pci_tbl);
2607
2608static struct pci_driver hrz_driver = {
2609	.name =		"horizon",
2610	.probe =	hrz_probe,
2611	.remove =	__devexit_p(hrz_remove_one),
2612	.id_table =	hrz_pci_tbl,
2613};
2614
2615/********** module entry **********/
2616
2617static int __init hrz_module_init (void) {
2618  // sanity check - cast is needed since printk does not support %Zu
2619  if (sizeof(struct MEMMAP) != 128*1024/4) {
2620    PRINTK (KERN_ERR, "Fix struct MEMMAP (is %lu fakewords).",
2621	    (unsigned long) sizeof(struct MEMMAP));
2622    return -ENOMEM;
2623  }
2624
2625  show_version();
2626
2627  // check arguments
2628  hrz_check_args();
2629
2630  // get the juice
2631  return pci_register_driver(&hrz_driver);
2632}
2633
2634/********** module exit **********/
2635
2636static void __exit hrz_module_exit (void) {
2637  PRINTD (DBG_FLOW, "cleanup_module");
2638
2639  pci_unregister_driver(&hrz_driver);
2640}
2641
2642module_init(hrz_module_init);
2643module_exit(hrz_module_exit);
2644