if_ate.c revision 204476
1/*-
2 * Copyright (c) 2006 M. Warner Losh.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26/* TODO
27 *
28 * 1) Turn on the clock in pmc?  Turn off?
29 * 2) GPIO initializtion in board setup code.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/arm/at91/if_ate.c 204476 2010-02-28 18:06:54Z ticso $");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/bus.h>
38#include <sys/kernel.h>
39#include <sys/mbuf.h>
40#include <sys/malloc.h>
41#include <sys/module.h>
42#include <sys/rman.h>
43#include <sys/socket.h>
44#include <sys/sockio.h>
45#include <sys/sysctl.h>
46#include <machine/bus.h>
47
48#include <net/ethernet.h>
49#include <net/if.h>
50#include <net/if_arp.h>
51#include <net/if_dl.h>
52#include <net/if_media.h>
53#include <net/if_mib.h>
54#include <net/if_types.h>
55
56#ifdef INET
57#include <netinet/in.h>
58#include <netinet/in_systm.h>
59#include <netinet/in_var.h>
60#include <netinet/ip.h>
61#endif
62
63#include <net/bpf.h>
64#include <net/bpfdesc.h>
65
66#include <dev/mii/mii.h>
67#include <dev/mii/miivar.h>
68#include <arm/at91/if_atereg.h>
69
70#include "miibus_if.h"
71
72#define	ATE_MAX_TX_BUFFERS	2	/* We have ping-pong tx buffers */
73#define	ATE_MAX_RX_BUFFERS	64
74
75/*
76 * Driver-specific flags.
77 */
78#define	ATE_FLAG_MULTICAST	0x01
79
80struct ate_softc
81{
82	struct ifnet	*ifp;		/* ifnet pointer */
83	struct mtx	sc_mtx;		/* Basically a perimeter lock */
84	device_t	dev;		/* Myself */
85	device_t	miibus;		/* My child miibus */
86	struct resource	*irq_res;	/* IRQ resource */
87	struct resource	*mem_res;	/* Memory resource */
88	struct callout	tick_ch;	/* Tick callout */
89	struct ifmib_iso_8802_3 mibdata; /* Stuff for network mgmt */
90	struct mbuf	*sent_mbuf[ATE_MAX_TX_BUFFERS]; /* Sent mbufs */
91	bus_dma_tag_t	mtag;		/* bus dma tag for mbufs */
92	bus_dma_tag_t	rxtag;
93	bus_dma_tag_t	rx_desc_tag;
94	bus_dmamap_t	rx_desc_map;
95	bus_dmamap_t	rx_map[ATE_MAX_RX_BUFFERS];
96	bus_dmamap_t	tx_map[ATE_MAX_TX_BUFFERS];
97	bus_addr_t	rx_desc_phys;
98	eth_rx_desc_t	*rx_descs;
99	void		*rx_buf[ATE_MAX_RX_BUFFERS]; /* RX buffer space */
100	void		*intrhand;	/* Interrupt handle */
101	int		flags;
102	int		if_flags;
103	int		rx_buf_ptr;
104	int		txcur;		/* Current TX map pointer */
105	int		use_rmii;
106};
107
108static inline uint32_t
109RD4(struct ate_softc *sc, bus_size_t off)
110{
111
112	return (bus_read_4(sc->mem_res, off));
113}
114
115static inline void
116WR4(struct ate_softc *sc, bus_size_t off, uint32_t val)
117{
118
119	bus_write_4(sc->mem_res, off, val);
120}
121
122static inline void
123BARRIER(struct ate_softc *sc, bus_size_t off, bus_size_t len, int flags)
124{
125
126	bus_barrier(sc->mem_res, off, len, flags);
127}
128
129#define	ATE_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
130#define	ATE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
131#define	ATE_LOCK_INIT(_sc)					\
132	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev),	\
133	    MTX_NETWORK_LOCK, MTX_DEF)
134#define	ATE_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
135#define	ATE_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
136#define	ATE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
137
138static devclass_t ate_devclass;
139
140/*
141 * ifnet entry points.
142 */
143static void	ateinit_locked(void *);
144static void	atestart_locked(struct ifnet *);
145
146static void	ateinit(void *);
147static void	atestart(struct ifnet *);
148static void	atestop(struct ate_softc *);
149static int	ateioctl(struct ifnet * ifp, u_long, caddr_t);
150
151/*
152 * Bus entry points.
153 */
154static int	ate_probe(device_t dev);
155static int	ate_attach(device_t dev);
156static int	ate_detach(device_t dev);
157static void	ate_intr(void *);
158
159/*
160 * Helper routines.
161 */
162static int	ate_activate(device_t dev);
163static void	ate_deactivate(struct ate_softc *sc);
164static int	ate_ifmedia_upd(struct ifnet *ifp);
165static void	ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
166static int	ate_get_mac(struct ate_softc *sc, u_char *eaddr);
167static void	ate_set_mac(struct ate_softc *sc, u_char *eaddr);
168static void	ate_rxfilter(struct ate_softc *sc);
169
170/*
171 * The AT91 family of products has the ethernet called EMAC.  However,
172 * it isn't self identifying.  It is anticipated that the parent bus
173 * code will take care to only add ate devices where they really are.  As
174 * such, we do nothing here to identify the device and just set its name.
175 */
176static int
177ate_probe(device_t dev)
178{
179
180	device_set_desc(dev, "EMAC");
181	return (0);
182}
183
184static int
185ate_attach(device_t dev)
186{
187	struct ate_softc *sc;
188	struct ifnet *ifp = NULL;
189	struct sysctl_ctx_list *sctx;
190	struct sysctl_oid *soid;
191	u_char eaddr[ETHER_ADDR_LEN];
192	uint32_t rnd;
193	int rid, err;
194
195	sc = device_get_softc(dev);
196	sc->dev = dev;
197	ATE_LOCK_INIT(sc);
198	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
199
200	/*
201	 * Allocate resources.
202	 */
203	rid = 0;
204	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
205	    RF_ACTIVE);
206	if (sc->mem_res == NULL) {
207		device_printf(dev, "could not allocate memory resources.\n");
208		err = ENOMEM;
209		goto out;
210	}
211	rid = 0;
212	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
213	    RF_ACTIVE);
214	if (sc->irq_res == NULL) {
215		device_printf(dev, "could not allocate interrupt resources.\n");
216		err = ENOMEM;
217		goto out;
218	}
219
220	err = ate_activate(dev);
221	if (err)
222		goto out;
223
224	sc->use_rmii = (RD4(sc, ETH_CFG) & ETH_CFG_RMII) == ETH_CFG_RMII;
225
226	/* Sysctls */
227	sctx = device_get_sysctl_ctx(dev);
228	soid = device_get_sysctl_tree(dev);
229	SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "rmii",
230	    CTLFLAG_RD, &sc->use_rmii, 0, "rmii in use");
231
232	/* Calling atestop before ifp is set is OK. */
233	ATE_LOCK(sc);
234	atestop(sc);
235	ATE_UNLOCK(sc);
236
237	if ((err = ate_get_mac(sc, eaddr)) != 0) {
238		/*
239		 * No MAC address configured. Generate the random one.
240		 */
241		if  (bootverbose)
242			device_printf(dev,
243			    "Generating random ethernet address.\n");
244		rnd = arc4random();
245
246		/*
247		 * Set OUI to convenient locally assigned address.  'b'
248		 * is 0x62, which has the locally assigned bit set, and
249		 * the broadcast/multicast bit clear.
250		 */
251		eaddr[0] = 'b';
252		eaddr[1] = 's';
253		eaddr[2] = 'd';
254		eaddr[3] = (rnd >> 16) & 0xff;
255		eaddr[4] = (rnd >> 8) & 0xff;
256		eaddr[5] = rnd & 0xff;
257	}
258
259	sc->ifp = ifp = if_alloc(IFT_ETHER);
260	if (mii_phy_probe(dev, &sc->miibus, ate_ifmedia_upd, ate_ifmedia_sts)) {
261		device_printf(dev, "Cannot find my PHY.\n");
262		err = ENXIO;
263		goto out;
264	}
265
266	ifp->if_softc = sc;
267	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
268	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
269	ifp->if_capabilities |= IFCAP_VLAN_MTU;
270	ifp->if_capenable |= IFCAP_VLAN_MTU;	/* The hw bits already set. */
271	ifp->if_start = atestart;
272	ifp->if_ioctl = ateioctl;
273	ifp->if_init = ateinit;
274	ifp->if_baudrate = 10000000;
275	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
276	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
277	IFQ_SET_READY(&ifp->if_snd);
278	ifp->if_linkmib = &sc->mibdata;
279	ifp->if_linkmiblen = sizeof(sc->mibdata);
280	sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS;
281	sc->if_flags = ifp->if_flags;
282
283	ether_ifattach(ifp, eaddr);
284
285	/*
286	 * Activate the interrupt.
287	 */
288	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
289	    NULL, ate_intr, sc, &sc->intrhand);
290	if (err) {
291		device_printf(dev, "could not establish interrupt handler.\n");
292		ether_ifdetach(ifp);
293		goto out;
294	}
295
296out:
297	if (err)
298		ate_detach(dev);
299	return (err);
300}
301
302static int
303ate_detach(device_t dev)
304{
305	struct ate_softc *sc;
306	struct ifnet *ifp;
307
308	sc = device_get_softc(dev);
309	KASSERT(sc != NULL, ("[ate: %d]: sc is NULL", __LINE__));
310	ifp = sc->ifp;
311	if (device_is_attached(dev)) {
312		ether_ifdetach(ifp);
313		ATE_LOCK(sc);
314		atestop(sc);
315		ATE_UNLOCK(sc);
316		callout_drain(&sc->tick_ch);
317	}
318	if (sc->miibus != NULL) {
319		device_delete_child(dev, sc->miibus);
320		sc->miibus = NULL;
321	}
322	bus_generic_detach(sc->dev);
323	ate_deactivate(sc);
324	if (sc->intrhand != NULL) {
325		bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
326		sc->intrhand = NULL;
327	}
328	if (ifp != NULL) {
329		if_free(ifp);
330		sc->ifp = NULL;
331	}
332	if (sc->mem_res != NULL) {
333		bus_release_resource(dev, SYS_RES_IOPORT,
334		    rman_get_rid(sc->mem_res), sc->mem_res);
335		sc->mem_res = NULL;
336	}
337	if (sc->irq_res != NULL) {
338		bus_release_resource(dev, SYS_RES_IRQ,
339		    rman_get_rid(sc->irq_res), sc->irq_res);
340		sc->irq_res = NULL;
341	}
342	ATE_LOCK_DESTROY(sc);
343	return (0);
344}
345
346static void
347ate_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
348{
349	struct ate_softc *sc;
350
351	if (error != 0)
352		return;
353	sc = (struct ate_softc *)arg;
354	sc->rx_desc_phys = segs[0].ds_addr;
355}
356
357static void
358ate_load_rx_buf(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
359{
360	struct ate_softc *sc;
361	int i;
362
363	if (error != 0)
364		return;
365	sc = (struct ate_softc *)arg;
366	i = sc->rx_buf_ptr;
367
368	/*
369	 * For the last buffer, set the wrap bit so the controller
370	 * restarts from the first descriptor.
371	 */
372	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
373	if (i == ATE_MAX_RX_BUFFERS - 1)
374		sc->rx_descs[i].addr = segs[0].ds_addr | ETH_WRAP_BIT;
375	else
376		sc->rx_descs[i].addr = segs[0].ds_addr;
377	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_POSTWRITE);
378	sc->rx_descs[i].status = 0;
379	/* Flush the memory in the mbuf */
380	bus_dmamap_sync(sc->rxtag, sc->rx_map[i], BUS_DMASYNC_PREREAD);
381}
382
383static uint32_t
384ate_mac_hash(const uint8_t *buf)
385{
386	uint32_t index = 0;
387	for (int i = 0; i < 48; i++) {
388		index ^= ((buf[i >> 3] >> (i & 7)) & 1) << (i % 6);
389	}
390	return (index);
391}
392
393/*
394 * Compute the multicast filter for this device using the standard
395 * algorithm.  I wonder why this isn't in ether somewhere as a lot
396 * of different MAC chips use this method (or the reverse the bits)
397 * method.
398 */
399static int
400ate_setmcast(struct ate_softc *sc)
401{
402	uint32_t index;
403	uint32_t mcaf[2];
404	u_char *af = (u_char *) mcaf;
405	struct ifmultiaddr *ifma;
406	struct ifnet *ifp;
407
408	ifp = sc->ifp;
409
410	if ((ifp->if_flags & IFF_PROMISC) != 0)
411		return (0);
412	if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
413		WR4(sc, ETH_HSL, 0xffffffff);
414		WR4(sc, ETH_HSH, 0xffffffff);
415		return (1);
416	}
417
418	/*
419	 * Compute the multicast hash.
420	 */
421	mcaf[0] = 0;
422	mcaf[1] = 0;
423	if_maddr_rlock(ifp);
424	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
425		if (ifma->ifma_addr->sa_family != AF_LINK)
426			continue;
427		index = ate_mac_hash(LLADDR((struct sockaddr_dl *)
428		    ifma->ifma_addr));
429		af[index >> 3] |= 1 << (index & 7);
430	}
431	if_maddr_runlock(ifp);
432
433	/*
434	 * Write the hash to the hash register.  This card can also
435	 * accept unicast packets as well as multicast packets using this
436	 * register for easier bridging operations, but we don't take
437	 * advantage of that.  Locks here are to avoid LOR with the
438	 * if_maddr_rlock, but might not be strictly necessary.
439	 */
440	WR4(sc, ETH_HSL, mcaf[0]);
441	WR4(sc, ETH_HSH, mcaf[1]);
442	return (mcaf[0] || mcaf[1]);
443}
444
445static int
446ate_activate(device_t dev)
447{
448	struct ate_softc *sc;
449	int err, i;
450
451	sc = device_get_softc(dev);
452
453	/*
454	 * Allocate DMA tags and maps.
455	 */
456	err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
457	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
458	    1, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx, &sc->mtag);
459	if (err != 0)
460		goto errout;
461	for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
462		err = bus_dmamap_create(sc->mtag, 0, &sc->tx_map[i]);
463		if (err != 0)
464			goto errout;
465	}
466
467	/*
468	 * Allocate DMA tags and maps for RX.
469	 */
470	err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
471	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
472	    1, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx, &sc->rxtag);
473	if (err != 0)
474		goto errout;
475
476	/*
477	 * DMA tag and map for the RX descriptors.
478	 */
479	err = bus_dma_tag_create(bus_get_dma_tag(dev), sizeof(eth_rx_desc_t),
480	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
481	    ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 1,
482	    ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 0, busdma_lock_mutex,
483	    &sc->sc_mtx, &sc->rx_desc_tag);
484	if (err != 0)
485		goto errout;
486	if (bus_dmamem_alloc(sc->rx_desc_tag, (void **)&sc->rx_descs,
487	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->rx_desc_map) != 0)
488		goto errout;
489	if (bus_dmamap_load(sc->rx_desc_tag, sc->rx_desc_map,
490	    sc->rx_descs, ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t),
491	    ate_getaddr, sc, 0) != 0)
492		goto errout;
493
494	/*
495	 * Allocate our RX buffers.  This chip has a RX structure that's filled
496	 * in.
497	 */
498	for (i = 0; i < ATE_MAX_RX_BUFFERS; i++) {
499		sc->rx_buf_ptr = i;
500		if (bus_dmamem_alloc(sc->rxtag, (void **)&sc->rx_buf[i],
501		      BUS_DMA_NOWAIT, &sc->rx_map[i]) != 0)
502			goto errout;
503		if (bus_dmamap_load(sc->rxtag, sc->rx_map[i], sc->rx_buf[i],
504		    MCLBYTES, ate_load_rx_buf, sc, 0) != 0)
505			goto errout;
506	}
507	sc->rx_buf_ptr = 0;
508	/* Flush the memory for the EMAC rx descriptor. */
509	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
510	/* Write the descriptor queue address. */
511	WR4(sc, ETH_RBQP, sc->rx_desc_phys);
512	return (0);
513
514errout:
515	return (ENOMEM);
516}
517
518static void
519ate_deactivate(struct ate_softc *sc)
520{
521	int i;
522
523	KASSERT(sc != NULL, ("[ate, %d]: sc is NULL!", __LINE__));
524	if (sc->mtag != NULL) {
525		for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
526			if (sc->sent_mbuf[i] != NULL) {
527				bus_dmamap_sync(sc->mtag, sc->tx_map[i],
528				    BUS_DMASYNC_POSTWRITE);
529				bus_dmamap_unload(sc->mtag, sc->tx_map[i]);
530				m_freem(sc->sent_mbuf[i]);
531			}
532			bus_dmamap_destroy(sc->mtag, sc->tx_map[i]);
533			sc->sent_mbuf[i] = NULL;
534			sc->tx_map[i] = NULL;
535		}
536		bus_dma_tag_destroy(sc->mtag);
537	}
538	if (sc->rx_desc_tag != NULL) {
539		if (sc->rx_descs != NULL) {
540			if (sc->rx_desc_phys != 0) {
541				bus_dmamap_sync(sc->rx_desc_tag,
542				    sc->rx_desc_map, BUS_DMASYNC_POSTREAD);
543				bus_dmamap_unload(sc->rx_desc_tag,
544				    sc->rx_desc_map);
545				sc->rx_desc_phys = 0;
546			}
547		}
548	}
549	if (sc->rxtag != NULL) {
550		for (i = 0; i < ATE_MAX_RX_BUFFERS; i++) {
551			if (sc->rx_buf[i] != NULL) {
552				if (sc->rx_descs[i].addr != 0) {
553					bus_dmamap_sync(sc->rxtag,
554					    sc->rx_map[i],
555					    BUS_DMASYNC_POSTREAD);
556					bus_dmamap_unload(sc->rxtag,
557					    sc->rx_map[i]);
558					sc->rx_descs[i].addr = 0;
559				}
560				bus_dmamem_free(sc->rxtag, sc->rx_buf[i],
561				    sc->rx_map[i]);
562				sc->rx_buf[i] = NULL;
563				sc->rx_map[i] = NULL;
564			}
565		}
566		bus_dma_tag_destroy(sc->rxtag);
567	}
568	if (sc->rx_desc_tag != NULL) {
569		if (sc->rx_descs != NULL)
570			bus_dmamem_free(sc->rx_desc_tag, sc->rx_descs,
571			    sc->rx_desc_map);
572		bus_dma_tag_destroy(sc->rx_desc_tag);
573		sc->rx_descs = NULL;
574		sc->rx_desc_tag = NULL;
575	}
576}
577
578/*
579 * Change media according to request.
580 */
581static int
582ate_ifmedia_upd(struct ifnet *ifp)
583{
584	struct ate_softc *sc = ifp->if_softc;
585	struct mii_data *mii;
586
587	mii = device_get_softc(sc->miibus);
588	ATE_LOCK(sc);
589	mii_mediachg(mii);
590	ATE_UNLOCK(sc);
591	return (0);
592}
593
594/*
595 * Notify the world which media we're using.
596 */
597static void
598ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
599{
600	struct ate_softc *sc = ifp->if_softc;
601	struct mii_data *mii;
602
603	mii = device_get_softc(sc->miibus);
604	ATE_LOCK(sc);
605	mii_pollstat(mii);
606	ifmr->ifm_active = mii->mii_media_active;
607	ifmr->ifm_status = mii->mii_media_status;
608	ATE_UNLOCK(sc);
609}
610
611static void
612ate_stat_update(struct ate_softc *sc, int active)
613{
614	uint32_t reg;
615
616	/*
617	 * The speed and full/half-duplex state needs to be reflected
618	 * in the ETH_CFG register.
619	 */
620	reg = RD4(sc, ETH_CFG);
621	reg &= ~(ETH_CFG_SPD | ETH_CFG_FD);
622	if (IFM_SUBTYPE(active) != IFM_10_T)
623		reg |= ETH_CFG_SPD;
624	if (active & IFM_FDX)
625		reg |= ETH_CFG_FD;
626	WR4(sc, ETH_CFG, reg);
627}
628
629static void
630ate_tick(void *xsc)
631{
632	struct ate_softc *sc = xsc;
633	struct ifnet *ifp = sc->ifp;
634	struct mii_data *mii;
635	int active;
636	uint32_t c;
637
638	/*
639	 * The KB920x boot loader tests ETH_SR & ETH_SR_LINK and will ask
640	 * the MII if there's a link if this bit is clear.  Not sure if we
641	 * should do the same thing here or not.
642	 */
643	ATE_ASSERT_LOCKED(sc);
644	if (sc->miibus != NULL) {
645		mii = device_get_softc(sc->miibus);
646		active = mii->mii_media_active;
647		mii_tick(mii);
648		if (mii->mii_media_status & IFM_ACTIVE &&
649		     active != mii->mii_media_active)
650			ate_stat_update(sc, mii->mii_media_active);
651	}
652
653	/*
654	 * Update the stats as best we can.  When we're done, clear
655	 * the status counters and start over.  We're supposed to read these
656	 * registers often enough that they won't overflow.  Hopefully
657	 * once a second is often enough.  Some don't map well to
658	 * the dot3Stats mib, so for those we just count them as general
659	 * errors.  Stats for iframes, ibutes, oframes and obytes are
660	 * collected elsewhere.  These registers zero on a read to prevent
661	 * races.  For all the collision stats, also update the collision
662	 * stats for the interface.
663	 */
664	sc->mibdata.dot3StatsAlignmentErrors += RD4(sc, ETH_ALE);
665	sc->mibdata.dot3StatsFCSErrors += RD4(sc, ETH_SEQE);
666	c = RD4(sc, ETH_SCOL);
667	ifp->if_collisions += c;
668	sc->mibdata.dot3StatsSingleCollisionFrames += c;
669	c = RD4(sc, ETH_MCOL);
670	sc->mibdata.dot3StatsMultipleCollisionFrames += c;
671	ifp->if_collisions += c;
672	sc->mibdata.dot3StatsSQETestErrors += RD4(sc, ETH_SQEE);
673	sc->mibdata.dot3StatsDeferredTransmissions += RD4(sc, ETH_DTE);
674	c = RD4(sc, ETH_LCOL);
675	sc->mibdata.dot3StatsLateCollisions += c;
676	ifp->if_collisions += c;
677	c = RD4(sc, ETH_ECOL);
678	sc->mibdata.dot3StatsExcessiveCollisions += c;
679	ifp->if_collisions += c;
680	sc->mibdata.dot3StatsCarrierSenseErrors += RD4(sc, ETH_CSE);
681	sc->mibdata.dot3StatsFrameTooLongs += RD4(sc, ETH_ELR);
682	sc->mibdata.dot3StatsInternalMacReceiveErrors += RD4(sc, ETH_DRFC);
683
684	/*
685	 * Not sure where to lump these, so count them against the errors
686	 * for the interface.
687	 */
688	sc->ifp->if_oerrors += RD4(sc, ETH_TUE);
689	sc->ifp->if_ierrors += RD4(sc, ETH_CDE) + RD4(sc, ETH_RJB) +
690	    RD4(sc, ETH_USF);
691
692	/*
693	 * Schedule another timeout one second from now.
694	 */
695	callout_reset(&sc->tick_ch, hz, ate_tick, sc);
696}
697
698static void
699ate_set_mac(struct ate_softc *sc, u_char *eaddr)
700{
701
702	WR4(sc, ETH_SA1L, (eaddr[3] << 24) | (eaddr[2] << 16) |
703	    (eaddr[1] << 8) | eaddr[0]);
704	WR4(sc, ETH_SA1H, (eaddr[5] << 8) | (eaddr[4]));
705}
706
707static int
708ate_get_mac(struct ate_softc *sc, u_char *eaddr)
709{
710	bus_size_t sa_low_reg[] = { ETH_SA1L, ETH_SA2L, ETH_SA3L, ETH_SA4L };
711	bus_size_t sa_high_reg[] = { ETH_SA1H, ETH_SA2H, ETH_SA3H, ETH_SA4H };
712	uint32_t low, high;
713	int i;
714
715	/*
716	 * The boot loader setup the MAC with an address, if one is set in
717	 * the loader. Grab one MAC address from the SA[1-4][HL] registers.
718	 */
719	for (i = 0; i < 4; i++) {
720		low = RD4(sc, sa_low_reg[i]);
721		high = RD4(sc, sa_high_reg[i]);
722		if ((low | (high & 0xffff)) != 0) {
723			eaddr[0] = low & 0xff;
724			eaddr[1] = (low >> 8) & 0xff;
725			eaddr[2] = (low >> 16) & 0xff;
726			eaddr[3] = (low >> 24) & 0xff;
727			eaddr[4] = high & 0xff;
728			eaddr[5] = (high >> 8) & 0xff;
729			return (0);
730		}
731	}
732	return (ENXIO);
733}
734
735static void
736ate_intr(void *xsc)
737{
738	struct ate_softc *sc = xsc;
739	struct ifnet *ifp = sc->ifp;
740	struct mbuf *mb;
741	void *bp;
742	uint32_t status, reg, rx_stat;
743	int i;
744
745	status = RD4(sc, ETH_ISR);
746	if (status == 0)
747		return;
748	if (status & ETH_ISR_RCOM) {
749		bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
750		    BUS_DMASYNC_POSTREAD);
751		while (sc->rx_descs[sc->rx_buf_ptr].addr & ETH_CPU_OWNER) {
752			i = sc->rx_buf_ptr;
753			sc->rx_buf_ptr = (i + 1) % ATE_MAX_RX_BUFFERS;
754			bp = sc->rx_buf[i];
755			rx_stat = sc->rx_descs[i].status;
756			if ((rx_stat & ETH_LEN_MASK) == 0) {
757				if (bootverbose)
758					device_printf(sc->dev, "ignoring bogus zero-length packet\n");
759				bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
760				    BUS_DMASYNC_PREWRITE);
761				sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
762				bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
763				    BUS_DMASYNC_POSTWRITE);
764				continue;
765			}
766			/* Flush memory for mbuf so we don't get stale bytes */
767			bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
768			    BUS_DMASYNC_POSTREAD);
769			WR4(sc, ETH_RSR, RD4(sc, ETH_RSR));
770
771			/*
772			 * The length returned by the device includes the
773			 * ethernet CRC calculation for the packet, but
774			 * ifnet drivers are supposed to discard it.
775			 */
776			mb = m_devget(sc->rx_buf[i],
777			    (rx_stat & ETH_LEN_MASK) - ETHER_CRC_LEN,
778			    ETHER_ALIGN, ifp, NULL);
779			bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
780			    BUS_DMASYNC_PREWRITE);
781			sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
782			bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
783			    BUS_DMASYNC_POSTWRITE);
784			bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
785			    BUS_DMASYNC_PREREAD);
786			if (mb != NULL) {
787				ifp->if_ipackets++;
788				(*ifp->if_input)(ifp, mb);
789			}
790
791		}
792	}
793	if (status & ETH_ISR_TCOM) {
794		ATE_LOCK(sc);
795		/* XXX TSR register should be cleared */
796		if (sc->sent_mbuf[0]) {
797			bus_dmamap_sync(sc->mtag, sc->tx_map[0],
798			    BUS_DMASYNC_POSTWRITE);
799			bus_dmamap_unload(sc->mtag, sc->tx_map[0]);
800			m_freem(sc->sent_mbuf[0]);
801			ifp->if_opackets++;
802			sc->sent_mbuf[0] = NULL;
803		}
804		if (sc->sent_mbuf[1]) {
805			if (RD4(sc, ETH_TSR) & ETH_TSR_IDLE) {
806				bus_dmamap_sync(sc->mtag, sc->tx_map[1],
807				    BUS_DMASYNC_POSTWRITE);
808				bus_dmamap_unload(sc->mtag, sc->tx_map[1]);
809				m_freem(sc->sent_mbuf[1]);
810				ifp->if_opackets++;
811				sc->txcur = 0;
812				sc->sent_mbuf[0] = sc->sent_mbuf[1] = NULL;
813			} else {
814				sc->sent_mbuf[0] = sc->sent_mbuf[1];
815				sc->sent_mbuf[1] = NULL;
816				sc->txcur = 1;
817			}
818		} else {
819			sc->sent_mbuf[0] = NULL;
820			sc->txcur = 0;
821		}
822		/*
823		 * We're no longer busy, so clear the busy flag and call the
824		 * start routine to xmit more packets.
825		 */
826		sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
827		atestart_locked(sc->ifp);
828		ATE_UNLOCK(sc);
829	}
830	if (status & ETH_ISR_RBNA) {
831		/* Workaround Errata #11 */
832		if (bootverbose)
833			device_printf(sc->dev, "RBNA workaround\n");
834		reg = RD4(sc, ETH_CTL);
835		WR4(sc, ETH_CTL, reg & ~ETH_CTL_RE);
836		BARRIER(sc, ETH_CTL, 4, BUS_SPACE_BARRIER_WRITE);
837		WR4(sc, ETH_CTL, reg | ETH_CTL_RE);
838	}
839}
840
841/*
842 * Reset and initialize the chip.
843 */
844static void
845ateinit_locked(void *xsc)
846{
847	struct ate_softc *sc = xsc;
848	struct ifnet *ifp = sc->ifp;
849 	struct mii_data *mii;
850	uint8_t eaddr[ETHER_ADDR_LEN];
851	uint32_t reg;
852
853	ATE_ASSERT_LOCKED(sc);
854
855	/*
856	 * XXX TODO(3)
857	 * we need to turn on the EMAC clock in the pmc.  With the
858	 * default boot loader, this is already turned on.  However, we
859	 * need to think about how best to turn it on/off as the interface
860	 * is brought up/down, as well as dealing with the mii bus...
861	 *
862	 * We also need to multiplex the pins correctly.
863	 */
864
865	/*
866	 * There are two different ways that the mii bus is connected
867	 * to this chip.  Select the right one based on a compile-time
868	 * option.
869	 */
870	reg = RD4(sc, ETH_CFG);
871	if (sc->use_rmii)
872		reg |= ETH_CFG_RMII;
873	else
874		reg &= ~ETH_CFG_RMII;
875	WR4(sc, ETH_CFG, reg);
876
877	ate_rxfilter(sc);
878
879	/*
880	 * Set the chip MAC address.
881	 */
882	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
883	ate_set_mac(sc, eaddr);
884
885	/*
886	 * Turn on MACs and interrupt processing.
887	 */
888	WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_TE | ETH_CTL_RE);
889	WR4(sc, ETH_IER, ETH_ISR_RCOM | ETH_ISR_TCOM | ETH_ISR_RBNA);
890
891	/* Enable big packets. */
892	WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
893
894	/*
895	 * Set 'running' flag, and clear output active flag
896	 * and attempt to start the output.
897	 */
898	ifp->if_drv_flags |= IFF_DRV_RUNNING;
899	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
900
901	mii = device_get_softc(sc->miibus);
902	mii_pollstat(mii);
903	ate_stat_update(sc, mii->mii_media_active);
904	atestart_locked(ifp);
905
906	callout_reset(&sc->tick_ch, hz, ate_tick, sc);
907}
908
909/*
910 * Dequeue packets and transmit.
911 */
912static void
913atestart_locked(struct ifnet *ifp)
914{
915	struct ate_softc *sc = ifp->if_softc;
916	struct mbuf *m, *mdefrag;
917	bus_dma_segment_t segs[1];
918	int nseg, e;
919
920	ATE_ASSERT_LOCKED(sc);
921	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
922		return;
923
924	while (sc->txcur < ATE_MAX_TX_BUFFERS) {
925		/*
926		 * Check to see if there's room to put another packet into the
927		 * xmit queue.  The EMAC chip has a ping-pong buffer for xmit
928		 * packets.  We use OACTIVE to indicate "we can stuff more into
929		 * our buffers (clear) or not (set)."
930		 */
931		if (!(RD4(sc, ETH_TSR) & ETH_TSR_BNQ)) {
932			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
933			return;
934		}
935		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
936		if (m == 0) {
937			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
938			return;
939		}
940		e = bus_dmamap_load_mbuf_sg(sc->mtag, sc->tx_map[sc->txcur], m,
941		    segs, &nseg, 0);
942		if (e == EFBIG) {
943			mdefrag = m_defrag(m, M_DONTWAIT);
944			if (mdefrag == NULL) {
945				IFQ_DRV_PREPEND(&ifp->if_snd, m);
946				return;
947			}
948			m = mdefrag;
949			e = bus_dmamap_load_mbuf_sg(sc->mtag,
950			    sc->tx_map[sc->txcur], m, segs, &nseg, 0);
951		}
952		if (e != 0) {
953			m_freem(m);
954			continue;
955		}
956		bus_dmamap_sync(sc->mtag, sc->tx_map[sc->txcur],
957		    BUS_DMASYNC_PREWRITE);
958
959		/*
960		 * Tell the hardware to xmit the packet.
961		 */
962		WR4(sc, ETH_TAR, segs[0].ds_addr);
963		BARRIER(sc, ETH_TAR, 8, BUS_SPACE_BARRIER_WRITE);
964		WR4(sc, ETH_TCR, segs[0].ds_len);
965
966		/*
967		 * Tap off here if there is a bpf listener.
968		 */
969		BPF_MTAP(ifp, m);
970
971		sc->sent_mbuf[sc->txcur] = m;
972		sc->txcur++;
973	}
974}
975
976static void
977ateinit(void *xsc)
978{
979	struct ate_softc *sc = xsc;
980
981	ATE_LOCK(sc);
982	ateinit_locked(sc);
983	ATE_UNLOCK(sc);
984}
985
986static void
987atestart(struct ifnet *ifp)
988{
989	struct ate_softc *sc = ifp->if_softc;
990
991	ATE_LOCK(sc);
992	atestart_locked(ifp);
993	ATE_UNLOCK(sc);
994}
995
996/*
997 * Turn off interrupts, and stop the NIC.  Can be called with sc->ifp NULL,
998 * so be careful.
999 */
1000static void
1001atestop(struct ate_softc *sc)
1002{
1003	struct ifnet *ifp;
1004	int i;
1005
1006	ATE_ASSERT_LOCKED(sc);
1007	ifp = sc->ifp;
1008	if (ifp) {
1009		ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1010	}
1011
1012	callout_stop(&sc->tick_ch);
1013
1014	/*
1015	 * Enable some parts of the MAC that are needed always (like the
1016	 * MII bus.  This turns off the RE and TE bits, which will remain
1017	 * off until ateinit() is called to turn them on.  With RE and TE
1018	 * turned off, there's no DMA to worry about after this write.
1019	 */
1020	WR4(sc, ETH_CTL, ETH_CTL_MPE);
1021
1022	/*
1023	 * Turn off all the configured options and revert to defaults.
1024	 */
1025	WR4(sc, ETH_CFG, ETH_CFG_CLK_32);
1026
1027	/*
1028	 * Turn off all the interrupts, and ack any pending ones by reading
1029	 * the ISR.
1030	 */
1031	WR4(sc, ETH_IDR, 0xffffffff);
1032	RD4(sc, ETH_ISR);
1033
1034	/*
1035	 * Clear out the Transmit and Receiver Status registers of any
1036	 * errors they may be reporting
1037	 */
1038	WR4(sc, ETH_TSR, 0xffffffff);
1039	WR4(sc, ETH_RSR, 0xffffffff);
1040
1041	/*
1042	 * Release TX resources.
1043	 */
1044	for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
1045		if (sc->sent_mbuf[i] != NULL) {
1046			bus_dmamap_sync(sc->mtag, sc->tx_map[i],
1047			    BUS_DMASYNC_POSTWRITE);
1048			bus_dmamap_unload(sc->mtag, sc->tx_map[i]);
1049			m_freem(sc->sent_mbuf[i]);
1050			sc->sent_mbuf[i] = NULL;
1051		}
1052	}
1053
1054	/*
1055	 * XXX we should power down the EMAC if it isn't in use, after
1056	 * putting it into loopback mode.  This saves about 400uA according
1057	 * to the datasheet.
1058	 */
1059}
1060
1061static void
1062ate_rxfilter(struct ate_softc *sc)
1063{
1064	struct ifnet *ifp;
1065	uint32_t reg;
1066	int enabled;
1067
1068	KASSERT(sc != NULL, ("[ate, %d]: sc is NULL!", __LINE__));
1069	ATE_ASSERT_LOCKED(sc);
1070	ifp = sc->ifp;
1071
1072	/*
1073	 * Wipe out old filter settings.
1074	 */
1075	reg = RD4(sc, ETH_CFG);
1076	reg &= ~(ETH_CFG_CAF | ETH_CFG_MTI | ETH_CFG_UNI);
1077	reg |= ETH_CFG_NBC;
1078	sc->flags &= ~ATE_FLAG_MULTICAST;
1079
1080	/*
1081	 * Set new parameters.
1082	 */
1083	if ((ifp->if_flags & IFF_BROADCAST) != 0)
1084		reg &= ~ETH_CFG_NBC;
1085	if ((ifp->if_flags & IFF_PROMISC) != 0) {
1086		reg |= ETH_CFG_CAF;
1087	} else {
1088		enabled = ate_setmcast(sc);
1089		if (enabled != 0) {
1090			reg |= ETH_CFG_MTI;
1091			sc->flags |= ATE_FLAG_MULTICAST;
1092		}
1093	}
1094	WR4(sc, ETH_CFG, reg);
1095}
1096
1097static int
1098ateioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1099{
1100	struct ate_softc *sc = ifp->if_softc;
1101 	struct mii_data *mii;
1102 	struct ifreq *ifr = (struct ifreq *)data;
1103	int drv_flags, flags;
1104	int mask, error, enabled;
1105
1106	error = 0;
1107	flags = ifp->if_flags;
1108	drv_flags = ifp->if_drv_flags;
1109	switch (cmd) {
1110	case SIOCSIFFLAGS:
1111		ATE_LOCK(sc);
1112		if ((flags & IFF_UP) != 0) {
1113			if ((drv_flags & IFF_DRV_RUNNING) != 0) {
1114				if (((flags ^ sc->if_flags)
1115				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1116					ate_rxfilter(sc);
1117			} else {
1118				ateinit_locked(sc);
1119			}
1120		} else if ((drv_flags & IFF_DRV_RUNNING) != 0) {
1121			atestop(sc);
1122		}
1123		sc->if_flags = flags;
1124		ATE_UNLOCK(sc);
1125		break;
1126
1127	case SIOCADDMULTI:
1128	case SIOCDELMULTI:
1129		if ((drv_flags & IFF_DRV_RUNNING) != 0) {
1130			ATE_LOCK(sc);
1131			enabled = ate_setmcast(sc);
1132			if (enabled != (sc->flags & ATE_FLAG_MULTICAST))
1133				ate_rxfilter(sc);
1134			ATE_UNLOCK(sc);
1135		}
1136		break;
1137
1138  	case SIOCSIFMEDIA:
1139  	case SIOCGIFMEDIA:
1140 		mii = device_get_softc(sc->miibus);
1141 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1142  		break;
1143	case SIOCSIFCAP:
1144		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1145		if (mask & IFCAP_VLAN_MTU) {
1146			ATE_LOCK(sc);
1147			if (ifr->ifr_reqcap & IFCAP_VLAN_MTU) {
1148				WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
1149				ifp->if_capenable |= IFCAP_VLAN_MTU;
1150			} else {
1151				WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_BIG);
1152				ifp->if_capenable &= ~IFCAP_VLAN_MTU;
1153			}
1154			ATE_UNLOCK(sc);
1155		}
1156	default:
1157		error = ether_ioctl(ifp, cmd, data);
1158		break;
1159	}
1160	return (error);
1161}
1162
1163static void
1164ate_child_detached(device_t dev, device_t child)
1165{
1166	struct ate_softc *sc;
1167
1168	sc = device_get_softc(dev);
1169	if (child == sc->miibus)
1170		sc->miibus = NULL;
1171}
1172
1173/*
1174 * MII bus support routines.
1175 */
1176static int
1177ate_miibus_readreg(device_t dev, int phy, int reg)
1178{
1179	struct ate_softc *sc;
1180	int val;
1181
1182	/*
1183	 * XXX if we implement agressive power savings, then we need
1184	 * XXX to make sure that the clock to the emac is on here
1185	 */
1186
1187	sc = device_get_softc(dev);
1188	DELAY(1);	/* Hangs w/o this delay really 30.5us atm */
1189	WR4(sc, ETH_MAN, ETH_MAN_REG_RD(phy, reg));
1190	while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
1191		continue;
1192	val = RD4(sc, ETH_MAN) & ETH_MAN_VALUE_MASK;
1193
1194	return (val);
1195}
1196
1197static int
1198ate_miibus_writereg(device_t dev, int phy, int reg, int data)
1199{
1200	struct ate_softc *sc;
1201
1202	/*
1203	 * XXX if we implement agressive power savings, then we need
1204	 * XXX to make sure that the clock to the emac is on here
1205	 */
1206
1207	sc = device_get_softc(dev);
1208	WR4(sc, ETH_MAN, ETH_MAN_REG_WR(phy, reg, data));
1209	while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
1210		continue;
1211	return (0);
1212}
1213
1214static device_method_t ate_methods[] = {
1215	/* Device interface */
1216	DEVMETHOD(device_probe,		ate_probe),
1217	DEVMETHOD(device_attach,	ate_attach),
1218	DEVMETHOD(device_detach,	ate_detach),
1219
1220	/* Bus interface */
1221	DEVMETHOD(bus_child_detached,	ate_child_detached),
1222
1223	/* MII interface */
1224	DEVMETHOD(miibus_readreg,	ate_miibus_readreg),
1225	DEVMETHOD(miibus_writereg,	ate_miibus_writereg),
1226
1227	{ 0, 0 }
1228};
1229
1230static driver_t ate_driver = {
1231	"ate",
1232	ate_methods,
1233	sizeof(struct ate_softc),
1234};
1235
1236DRIVER_MODULE(ate, atmelarm, ate_driver, ate_devclass, 0, 0);
1237DRIVER_MODULE(miibus, ate, miibus_driver, miibus_devclass, 0, 0);
1238MODULE_DEPEND(ate, miibus, 1, 1, 1);
1239MODULE_DEPEND(ate, ether, 1, 1, 1);
1240