1/*	$NetBSD: xmm7360.c,v 1.17 2022/10/27 00:01:07 riastradh Exp $	*/
2
3/*
4 * Device driver for Intel XMM7360 LTE modems, eg. Fibocom L850-GL.
5 * Written by James Wah
6 * james@laird-wah.net
7 *
8 * Development of this driver was supported by genua GmbH
9 *
10 * Copyright (c) 2020 genua GmbH <info@genua.de>
11 * Copyright (c) 2020 James Wah <james@laird-wah.net>
12 *
13 * The OpenBSD and NetBSD support was written by Jaromir Dolecek for
14 * Moritz Systems Technology Company Sp. z o.o.
15 *
16 * Permission to use, copy, modify, and/or distribute this software for any
17 * purpose with or without fee is hereby granted, provided that the above
18 * copyright notice and this permission notice appear in all copies.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES ON
22 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGE
24 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27 */
28
29#ifdef __linux__
30
31#include <linux/init.h>
32#include <linux/interrupt.h>
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/delay.h>
37#include <linux/uaccess.h>
38#include <linux/cdev.h>
39#include <linux/wait.h>
40#include <linux/tty.h>
41#include <linux/tty_flip.h>
42#include <linux/poll.h>
43#include <linux/skbuff.h>
44#include <linux/netdevice.h>
45#include <linux/if.h>
46#include <linux/if_arp.h>
47#include <net/rtnetlink.h>
48#include <linux/hrtimer.h>
49#include <linux/workqueue.h>
50
51MODULE_LICENSE("Dual BSD/GPL");
52
53static const struct pci_device_id xmm7360_ids[] = {
54	{ PCI_DEVICE(0x8086, 0x7360), },
55	{ 0, }
56};
57MODULE_DEVICE_TABLE(pci, xmm7360_ids);
58
59/* Actually this ioctl not used for xmm0/rpc device by python code */
60#define XMM7360_IOCTL_GET_PAGE_SIZE _IOC(_IOC_READ, 'x', 0xc0, sizeof(u32))
61
62#define xmm7360_os_msleep(msec)		msleep(msec)
63
64#define __unused			/* nothing */
65
66#endif
67
68#if defined(__OpenBSD__) || defined(__NetBSD__)
69
70#ifdef __OpenBSD__
71#include "bpfilter.h"
72#endif
73#ifdef __NetBSD__
74#include "opt_inet.h"
75#include "opt_gateway.h"
76
77#include <sys/cdefs.h>
78__KERNEL_RCSID(0, "$NetBSD: xmm7360.c,v 1.17 2022/10/27 00:01:07 riastradh Exp $");
79#endif
80
81#include <sys/param.h>
82#include <sys/systm.h>
83#include <sys/sockio.h>
84#include <sys/mbuf.h>
85#include <sys/kernel.h>
86#include <sys/device.h>
87#include <sys/socket.h>
88#include <sys/mutex.h>
89#include <sys/tty.h>
90#include <sys/conf.h>
91#include <sys/kthread.h>
92#include <sys/poll.h>
93#include <sys/fcntl.h>		/* for FREAD/FWRITE */
94#include <sys/vnode.h>
95#include <uvm/uvm_param.h>
96
97#include <dev/pci/pcireg.h>
98#include <dev/pci/pcivar.h>
99#include <dev/pci/pcidevs.h>
100
101#include <net/if.h>
102#include <net/if_types.h>
103
104#include <netinet/in.h>
105#include <netinet/ip.h>
106#include <netinet/ip6.h>
107
108#ifdef __OpenBSD__
109#include <netinet/if_ether.h>
110#include <sys/timeout.h>
111#include <machine/bus.h>
112#endif
113
114#if NBPFILTER > 0 || defined(__NetBSD__)
115#include <net/bpf.h>
116#endif
117
118#ifdef __NetBSD__
119#include "ioconf.h"
120#include <sys/cpu.h>
121#endif
122
123#ifdef INET
124#include <netinet/in_var.h>
125#endif
126#ifdef INET6
127#include <netinet6/in6_var.h>
128#endif
129
130typedef uint8_t u8;
131typedef uint16_t u16;
132typedef uint32_t u32;
133typedef bus_addr_t dma_addr_t;
134typedef void * wait_queue_head_t;	/* just address for tsleep() */
135
136#define WWAN_BAR0	PCI_MAPREG_START
137#define WWAN_BAR1	(PCI_MAPREG_START + 4)
138#define WWAN_BAR2	(PCI_MAPREG_START + 8)
139
140#define BUG_ON(never_true)	KASSERT(!(never_true))
141#define WARN_ON(x)		/* nothing */
142
143#ifdef __OpenBSD__
144typedef struct mutex spinlock_t;
145#define dev_err(devp, fmt, ...)		\
146	printf("%s: " fmt, device_xname(devp), ##__VA_ARGS__)
147#define dev_info(devp, fmt, ...)	\
148	printf("%s: " fmt, device_xname(devp), ##__VA_ARGS__)
149#define	kzalloc(size, flags)	malloc(size, M_DEVBUF, M_WAITOK | M_ZERO)
150#define kfree(addr)		free(addr, M_DEVBUF, 0)
151#define mutex_init(lock)	mtx_init(lock, IPL_TTY)
152#define mutex_lock(lock)	mtx_enter(lock)
153#define mutex_unlock(lock)	mtx_leave(lock)
154/* In OpenBSD every mutex is spin mutex, and it must not be held on sleep */
155#define spin_lock_irqsave(lock, flags)		mtx_enter(lock)
156#define spin_unlock_irqrestore(lock, flags)	mtx_leave(lock)
157
158/* Compat defines for NetBSD API */
159#define curlwp			curproc
160#define LINESW(tp)				(linesw[(tp)->t_line])
161#define selnotify(sel, band, note)		selwakeup(sel)
162#define cfdata_t				void *
163#define device_lookup_private(cdp, unit)	\
164	(unit < (*cdp).cd_ndevs) ? (*cdp).cd_devs[unit] : NULL
165#define IFQ_SET_READY(ifq)			/* nothing */
166#define device_private(devt)			(void *)devt;
167#define if_deferred_start_init(ifp, arg)	/* nothing */
168#define IF_OUTPUT_CONST				/* nothing */
169#define knote_set_eof(kn, f)			(kn)->kn_flags |= EV_EOF | (f)
170#define tty_lock(tp)				int s = spltty()
171#define tty_unlock(tp)				splx(s)
172#define tty_locked(tp)				/* nothing */
173#define pmf_device_deregister(dev)		/* nothing */
174#if NBPFILTER > 0
175#define BPF_MTAP_OUT(ifp, m)						\
176                if (ifp->if_bpf) {					\
177                        bpf_mtap_af(ifp->if_bpf, m->m_pkthdr.ph_family,	\
178			    m, BPF_DIRECTION_OUT);			\
179		}
180#else
181#define BPF_MTAP_OUT(ifp, m)			/* nothing */
182#endif
183
184/* Copied from NetBSD <lib/libkern/libkern.h> */
185#define __validate_container_of(PTR, TYPE, FIELD)			\
186    (0 * sizeof((PTR) - &((TYPE *)(((char *)(PTR)) -			\
187    offsetof(TYPE, FIELD)))->FIELD))
188#define	container_of(PTR, TYPE, FIELD)					\
189    ((TYPE *)(((char *)(PTR)) - offsetof(TYPE, FIELD))			\
190	+ __validate_container_of(PTR, TYPE, FIELD))
191
192/* Copied from NetBSD <sys/cdefs.h> */
193#define __UNVOLATILE(a)		((void *)(unsigned long)(volatile void *)(a))
194
195#if OpenBSD <= 201911
196/* Backward compat with OpenBSD 6.6 */
197#define klist_insert(klist, kn)		\
198		SLIST_INSERT_HEAD(klist, kn, kn_selnext)
199#define klist_remove(klist, kn)		\
200		SLIST_REMOVE(klist, kn, knote, kn_selnext)
201#define XMM_KQ_ISFD_INITIALIZER		.f_isfd = 1
202#else
203#define XMM_KQ_ISFD_INITIALIZER		.f_flags = FILTEROP_ISFD
204#endif /* OpenBSD <= 201911 */
205
206#define	selrecord_knote(si, kn)						\
207	klist_insert(&(si)->si_note, (kn))
208#define	selremove_knote(si, kn)						\
209	klist_remove(&(si)->si_note, (kn))
210
211#endif
212
213#ifdef __NetBSD__
214typedef struct kmutex spinlock_t;
215#define dev_err			aprint_error_dev
216#define dev_info		aprint_normal_dev
217#define mutex			kmutex
218#define kzalloc(size, flags)	malloc(size, M_DEVBUF, M_WAITOK | M_ZERO)
219#define kfree(addr)		free(addr, M_DEVBUF)
220#define mutex_init(lock)	mutex_init(lock, MUTEX_DEFAULT, IPL_TTY)
221#define mutex_lock(lock)	mutex_enter(lock)
222#define mutex_unlock(lock)	mutex_exit(lock)
223#define spin_lock_irqsave(lock, flags)	mutex_enter(lock)
224#define spin_unlock_irqrestore(lock, flags)	mutex_exit(lock)
225
226/* Compat defines with OpenBSD API */
227#define caddr_t			void *
228#define proc			lwp
229#define LINESW(tp)		(*tp->t_linesw)
230#define ttymalloc(speed)	tty_alloc()
231#define ttyfree(tp)		tty_free(tp)
232#define l_open(dev, tp, p)	l_open(dev, tp)
233#define l_close(tp, flag, p)	l_close(tp, flag)
234#define ttkqfilter(dev, kn)	ttykqfilter(dev, kn)
235#define msleep(ident, lock, prio, wmesg, timo) \
236		mtsleep(ident, prio, wmesg, timo, lock)
237#define pci_mapreg_map(pa, reg, type, busfl, tp, hp, bp, szp, maxsize) \
238	pci_mapreg_map(pa, reg, type, busfl, tp, hp, bp, szp)
239#define pci_intr_establish(pc, ih, lvl, func, arg, name) \
240	pci_intr_establish_xname(pc, ih, lvl, func, arg, name)
241#define suser(l)					\
242	kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp)
243#define kthread_create(func, arg, lwpp, name)		\
244	kthread_create(0, 0, NULL, func, arg, lwpp, "%s", name)
245#define MUTEX_ASSERT_LOCKED(lock)	KASSERT(mutex_owned(lock))
246#define MCLGETI(m, how, m0, sz)		MCLGET(m, how)
247#define m_copyback(m, off, sz, buf, how)		\
248					m_copyback(m, off, sz, buf)
249#define ifq_deq_begin(ifq)		({		\
250		struct mbuf *m0;			\
251		IFQ_DEQUEUE(ifq, m0);			\
252		m0;					\
253})
254#define ifq_deq_rollback(ifq, m)	m_freem(m)
255#define ifq_deq_commit(ifq, m)		/* nothing to do */
256#define ifq_is_oactive(ifq)		true	/* always restart queue */
257#define ifq_clr_oactive(ifq)		/* nothing to do */
258#define ifq_empty(ifq)			IFQ_IS_EMPTY(ifq)
259#define ifq_purge(ifq)			IF_PURGE(ifq)
260#define if_enqueue(ifp, m)		ifq_enqueue(ifp, m)
261#define if_ih_insert(ifp, func, arg)	(ifp)->_if_input = (func)
262#define if_ih_remove(ifp, func, arg)	/* nothing to do */
263#define if_hardmtu			if_mtu
264#define IF_OUTPUT_CONST			const
265#define XMM_KQ_ISFD_INITIALIZER		.f_flags = FILTEROP_ISFD
266#define tty_lock(tp)			ttylock(tp)
267#define tty_unlock(tp)			ttyunlock(tp)
268#define tty_locked(tp)			KASSERT(ttylocked(tp))
269#define bpfattach(bpf, ifp, dlt, sz)	bpf_attach(ifp, dlt, sz)
270#define NBPFILTER			1
271#define BPF_MTAP_OUT(ifp, m)		bpf_mtap(ifp, m, BPF_D_OUT)
272#endif /* __NetBSD__ */
273
274#define __user				/* nothing */
275#define copy_from_user(kbuf, userbuf, sz)		\
276({							\
277	int __ret = 0;					\
278	int error = copyin(userbuf, kbuf, sz);		\
279	if (error != 0)					\
280		return -error;				\
281	__ret;						\
282})
283#define copy_to_user(kbuf, userbuf, sz)			\
284({							\
285	int __ret = 0;					\
286	int error = copyout(userbuf, kbuf, sz);		\
287	if (error != 0)					\
288		return -error;				\
289	__ret;						\
290})
291#define xmm7360_os_msleep(msec)					\
292	do {							\
293		KASSERT(!cold);					\
294		tsleep(xmm, 0, "wwancsl", msec * hz / 1000);	\
295	} while (0)
296
297static pktq_rps_hash_func_t xmm7360_pktq_rps_hash_p;
298static void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, int);
299static void dma_free_coherent(struct device *, size_t, volatile void *, dma_addr_t);
300
301#ifndef PCI_PRODUCT_INTEL_XMM7360
302#define PCI_PRODUCT_INTEL_XMM7360	0x7360
303#endif
304
305#define init_waitqueue_head(wqp)	*(wqp) = (wqp)
306#define wait_event_interruptible(wq, cond)				\
307({									\
308	int __ret = 1;							\
309	while (!(cond)) {						\
310		KASSERT(!cold);						\
311		int error = tsleep(wq, PCATCH, "xmmwq", 0);		\
312		if (error) {						\
313			__ret = (cond) ? 1				\
314			    : ((error != ERESTART) ? -error : error);	\
315			break;						\
316		}							\
317	}								\
318	__ret;								\
319})
320
321#define msecs_to_jiffies(msec)						\
322({									\
323	KASSERT(hz < 1000);						\
324	KASSERT(msec > (1000 / hz));					\
325	msec * hz / 1000;						\
326})
327
328#define wait_event_interruptible_timeout(wq, cond, jiffies)		\
329({									\
330	int __ret = 1;							\
331	while (!(cond)) {						\
332		if (cold) {						\
333			for (int loop = 0; loop < 10; loop++) {		\
334				delay(jiffies * 1000 * 1000 / hz / 10);	\
335				if (cond)				\
336					break;				\
337			}						\
338			__ret = (cond) ? 1 : 0;				\
339			break;						\
340		}							\
341		int error = tsleep(wq, PCATCH, "xmmwq", jiffies);	\
342		if (error) {						\
343			__ret = (cond) ? 1				\
344			    : ((error != ERESTART) ? -error : error);	\
345			break;						\
346		}							\
347	}								\
348	__ret;								\
349})
350
351#define GFP_KERNEL			0
352
353#endif /* __OpenBSD__ || __NetBSD__ */
354
355/*
356 * The XMM7360 communicates via DMA ring buffers. It has one
357 * command ring, plus sixteen transfer descriptor (TD)
358 * rings. The command ring is mainly used to configure and
359 * deconfigure the TD rings.
360 *
361 * The 16 TD rings form 8 queue pairs (QP). For example, QP
362 * 0 uses ring 0 for host->device, and ring 1 for
363 * device->host.
364 *
365 * The known queue pair functions are as follows:
366 *
367 * 0:	Mux (Raw IP packets, amongst others)
368 * 1:	RPC (funky command protocol based in part on ASN.1 BER)
369 * 2:	AT trace? port; does not accept commands after init
370 * 4:	AT command port
371 * 7:	AT command port
372 *
373 */
374
375/* Command ring, which is used to configure the queue pairs */
376struct cmd_ring_entry {
377	dma_addr_t ptr;
378	u16 len;
379	u8 parm;
380	u8 cmd;
381	u32 extra;
382	u32 unk, flags;
383};
384
385#define CMD_RING_OPEN	1
386#define CMD_RING_CLOSE	2
387#define CMD_RING_FLUSH	3
388#define CMD_WAKEUP	4
389
390#define CMD_FLAG_DONE	1
391#define CMD_FLAG_READY	2
392
393/* Transfer descriptors used on the Tx and Rx rings of each queue pair */
394struct td_ring_entry {
395	dma_addr_t addr;
396	u16 length;
397	u16 flags;
398	u32 unk;
399};
400
401#define TD_FLAG_COMPLETE 0x200
402
403/* Root configuration object. This contains pointers to all of the control
404 * structures that the modem will interact with.
405 */
406struct control {
407	dma_addr_t status;
408	dma_addr_t s_wptr, s_rptr;
409	dma_addr_t c_wptr, c_rptr;
410	dma_addr_t c_ring;
411	u16 c_ring_size;
412	u16 unk;
413};
414
415struct status {
416	u32 code;
417	u32 mode;
418	u32 asleep;
419	u32 pad;
420};
421
422#define CMD_RING_SIZE 0x80
423
424/* All of the control structures can be packed into one page of RAM. */
425struct control_page {
426	struct control ctl;
427	// Status words - written by modem.
428	volatile struct status status;
429	// Slave ring write/read pointers.
430	volatile u32 s_wptr[16], s_rptr[16];
431	// Command ring write/read pointers.
432	volatile u32 c_wptr, c_rptr;
433	// Command ring entries.
434	volatile struct cmd_ring_entry c_ring[CMD_RING_SIZE];
435};
436
437#define BAR0_MODE	0x0c
438#define BAR0_DOORBELL	0x04
439#define BAR0_WAKEUP	0x14
440
441#define DOORBELL_TD	0
442#define DOORBELL_CMD	1
443
444#define BAR2_STATUS	0x00
445#define BAR2_MODE	0x18
446#define BAR2_CONTROL	0x19
447#define BAR2_CONTROLH	0x1a
448
449#define BAR2_BLANK0	0x1b
450#define BAR2_BLANK1	0x1c
451#define BAR2_BLANK2	0x1d
452#define BAR2_BLANK3	0x1e
453
454#define XMM_MODEM_BOOTING	0xfeedb007
455#define XMM_MODEM_READY		0x600df00d
456
457#define XMM_TAG_ACBH		0x41434248	// 'ACBH'
458#define XMM_TAG_CMDH		0x434d4448	// 'CMDH'
459#define XMM_TAG_ADBH		0x41444248	// 'ADBH'
460#define XMM_TAG_ADTH		0x41445448	// 'ADTH'
461
462/* There are 16 TD rings: a Tx and Rx ring for each queue pair */
463struct td_ring {
464	u8 depth;
465	u8 last_handled;
466	u16 page_size;
467
468	struct td_ring_entry *tds;
469	dma_addr_t tds_phys;
470
471	// One page of page_size per td
472	void **pages;
473	dma_addr_t *pages_phys;
474};
475
476#define TD_MAX_PAGE_SIZE 16384
477
478struct queue_pair {
479	struct xmm_dev *xmm;
480	u8 depth;
481	u16 page_size;
482	int tty_index;
483	int tty_needs_wake;
484#ifdef __linux__
485	struct device dev;
486#endif
487	int num;
488	int open;
489	struct mutex lock;
490	unsigned char user_buf[TD_MAX_PAGE_SIZE];
491	wait_queue_head_t wq;
492
493#ifdef __linux__
494	struct cdev cdev;
495	struct tty_port port;
496#endif
497#if defined(__OpenBSD__) || defined(__NetBSD__)
498	struct selinfo selr, selw;
499#endif
500};
501
502#define XMM_QP_COUNT	8
503
504struct xmm_dev {
505	struct device *dev;
506
507	volatile uint32_t *bar0, *bar2;
508
509	volatile struct control_page *cp;
510	dma_addr_t cp_phys;
511
512	struct td_ring td_ring[2 * XMM_QP_COUNT];
513
514	struct queue_pair qp[XMM_QP_COUNT];
515
516	struct xmm_net *net;
517	struct net_device *netdev;
518
519	int error;
520	int card_num;
521	int num_ttys;
522	wait_queue_head_t wq;
523
524#ifdef __linux__
525	struct pci_dev *pci_dev;
526
527	int irq;
528
529	struct work_struct init_work;	// XXX work not actually scheduled
530#endif
531};
532
533struct mux_bounds {
534	uint32_t offset;
535	uint32_t length;
536};
537
538struct mux_first_header {
539	uint32_t tag;
540	uint16_t unknown;
541	uint16_t sequence;
542	uint16_t length;
543	uint16_t extra;
544	uint16_t next;
545	uint16_t pad;
546};
547
548struct mux_next_header {
549	uint32_t tag;
550	uint16_t length;
551	uint16_t extra;
552	uint16_t next;
553	uint16_t pad;
554};
555
556#define MUX_MAX_PACKETS	64
557
558struct mux_frame {
559	int n_packets, n_bytes, max_size, sequence;
560	uint16_t *last_tag_length, *last_tag_next;
561	struct mux_bounds bounds[MUX_MAX_PACKETS];
562	uint8_t data[TD_MAX_PAGE_SIZE];
563};
564
565struct xmm_net {
566	struct xmm_dev *xmm;
567	struct queue_pair *qp;
568	int channel;
569
570#ifdef __linux__
571	struct sk_buff_head queue;
572	struct hrtimer deadline;
573#endif
574	int queued_packets, queued_bytes;
575
576	int sequence;
577	spinlock_t lock;
578	struct mux_frame frame;
579};
580
581static void xmm7360_os_handle_net_frame(struct xmm_dev *, const u8 *, size_t);
582static void xmm7360_os_handle_net_dequeue(struct xmm_net *, struct mux_frame *);
583static void xmm7360_os_handle_net_txwake(struct xmm_net *);
584static void xmm7360_os_handle_tty_idata(struct queue_pair *, const u8 *, size_t);
585
586static void xmm7360_poll(struct xmm_dev *xmm)
587{
588	if (xmm->cp->status.code == 0xbadc0ded) {
589		dev_err(xmm->dev, "crashed but dma up\n");
590		xmm->error = -ENODEV;
591	}
592	if (xmm->bar2[BAR2_STATUS] != XMM_MODEM_READY) {
593		dev_err(xmm->dev, "bad status %x\n",xmm->bar2[BAR2_STATUS]);
594		xmm->error = -ENODEV;
595	}
596}
597
598static void xmm7360_ding(struct xmm_dev *xmm, int bell)
599{
600	if (xmm->cp->status.asleep)
601		xmm->bar0[BAR0_WAKEUP] = 1;
602	xmm->bar0[BAR0_DOORBELL] = bell;
603	xmm7360_poll(xmm);
604}
605
606static int xmm7360_cmd_ring_wait(struct xmm_dev *xmm)
607{
608	// Wait for all commands to complete
609	// XXX locking?
610	int ret = wait_event_interruptible_timeout(xmm->wq, (xmm->cp->c_rptr == xmm->cp->c_wptr) || xmm->error, msecs_to_jiffies(1000));
611	if (ret == 0)
612		return -ETIMEDOUT;
613	if (ret < 0)
614		return ret;
615	return xmm->error;
616}
617
618static int xmm7360_cmd_ring_execute(struct xmm_dev *xmm, u8 cmd, u8 parm, u16 len, dma_addr_t ptr, u32 extra)
619{
620	u8 wptr = xmm->cp->c_wptr;
621	u8 new_wptr = (wptr + 1) % CMD_RING_SIZE;
622	if (xmm->error)
623		return xmm->error;
624	if (new_wptr == xmm->cp->c_rptr)	// ring full
625		return -EAGAIN;
626
627	xmm->cp->c_ring[wptr].ptr = ptr;
628	xmm->cp->c_ring[wptr].cmd = cmd;
629	xmm->cp->c_ring[wptr].parm = parm;
630	xmm->cp->c_ring[wptr].len = len;
631	xmm->cp->c_ring[wptr].extra = extra;
632	xmm->cp->c_ring[wptr].unk = 0;
633	xmm->cp->c_ring[wptr].flags = CMD_FLAG_READY;
634
635	xmm->cp->c_wptr = new_wptr;
636
637	xmm7360_ding(xmm, DOORBELL_CMD);
638	return xmm7360_cmd_ring_wait(xmm);
639}
640
641static int xmm7360_cmd_ring_init(struct xmm_dev *xmm) {
642	int timeout;
643	int ret;
644
645	xmm->cp = dma_alloc_coherent(xmm->dev, sizeof(struct control_page), &xmm->cp_phys, GFP_KERNEL);
646	BUG_ON(xmm->cp == NULL);
647
648	xmm->cp->ctl.status = xmm->cp_phys + offsetof(struct control_page, status);
649	xmm->cp->ctl.s_wptr = xmm->cp_phys + offsetof(struct control_page, s_wptr);
650	xmm->cp->ctl.s_rptr = xmm->cp_phys + offsetof(struct control_page, s_rptr);
651	xmm->cp->ctl.c_wptr = xmm->cp_phys + offsetof(struct control_page, c_wptr);
652	xmm->cp->ctl.c_rptr = xmm->cp_phys + offsetof(struct control_page, c_rptr);
653	xmm->cp->ctl.c_ring = xmm->cp_phys + offsetof(struct control_page, c_ring);
654	xmm->cp->ctl.c_ring_size = CMD_RING_SIZE;
655
656	xmm->bar2[BAR2_CONTROL] = xmm->cp_phys;
657	xmm->bar2[BAR2_CONTROLH] = xmm->cp_phys >> 32;
658
659	xmm->bar0[BAR0_MODE] = 1;
660
661	timeout = 100;
662	while (xmm->bar2[BAR2_MODE] == 0 && --timeout)
663		xmm7360_os_msleep(10);
664
665	if (!timeout)
666		return -ETIMEDOUT;
667
668	xmm->bar2[BAR2_BLANK0] = 0;
669	xmm->bar2[BAR2_BLANK1] = 0;
670	xmm->bar2[BAR2_BLANK2] = 0;
671	xmm->bar2[BAR2_BLANK3] = 0;
672
673	xmm->bar0[BAR0_MODE] = 2;	// enable intrs?
674
675	timeout = 100;
676	while (xmm->bar2[BAR2_MODE] != 2 && --timeout)
677		xmm7360_os_msleep(10);
678
679	if (!timeout)
680		return -ETIMEDOUT;
681
682	// enable going to sleep when idle
683	ret = xmm7360_cmd_ring_execute(xmm, CMD_WAKEUP, 0, 1, 0, 0);
684	if (ret)
685		return ret;
686
687	return 0;
688}
689
690static void xmm7360_cmd_ring_free(struct xmm_dev *xmm) {
691	if (xmm->bar0)
692		xmm->bar0[BAR0_MODE] = 0;
693	if (xmm->cp)
694		dma_free_coherent(xmm->dev, sizeof(struct control_page), (volatile void *)xmm->cp, xmm->cp_phys);
695	xmm->cp = NULL;
696	return;
697}
698
699static void xmm7360_td_ring_activate(struct xmm_dev *xmm, u8 ring_id)
700{
701	struct td_ring *ring = &xmm->td_ring[ring_id];
702	int ret __diagused;
703
704	xmm->cp->s_rptr[ring_id] = xmm->cp->s_wptr[ring_id] = 0;
705	ring->last_handled = 0;
706	ret = xmm7360_cmd_ring_execute(xmm, CMD_RING_OPEN, ring_id, ring->depth, ring->tds_phys, 0x60);
707	BUG_ON(ret);
708}
709
710static void xmm7360_td_ring_create(struct xmm_dev *xmm, u8 ring_id, u8 depth, u16 page_size)
711{
712	struct td_ring *ring = &xmm->td_ring[ring_id];
713	int i;
714
715	BUG_ON(ring->depth);
716	BUG_ON(depth & (depth-1));
717	BUG_ON(page_size > TD_MAX_PAGE_SIZE);
718
719	memset(ring, 0, sizeof(struct td_ring));
720	ring->depth = depth;
721	ring->page_size = page_size;
722	ring->tds = dma_alloc_coherent(xmm->dev, sizeof(struct td_ring_entry)*depth, &ring->tds_phys, GFP_KERNEL);
723
724	ring->pages = kzalloc(sizeof(void*)*depth, GFP_KERNEL);
725	ring->pages_phys = kzalloc(sizeof(dma_addr_t)*depth, GFP_KERNEL);
726
727	for (i=0; i<depth; i++) {
728		ring->pages[i] = dma_alloc_coherent(xmm->dev, ring->page_size, &ring->pages_phys[i], GFP_KERNEL);
729		ring->tds[i].addr = ring->pages_phys[i];
730	}
731
732	xmm7360_td_ring_activate(xmm, ring_id);
733}
734
735static void xmm7360_td_ring_deactivate(struct xmm_dev *xmm, u8 ring_id)
736{
737	xmm7360_cmd_ring_execute(xmm, CMD_RING_CLOSE, ring_id, 0, 0, 0);
738}
739
740static void xmm7360_td_ring_destroy(struct xmm_dev *xmm, u8 ring_id)
741{
742	struct td_ring *ring = &xmm->td_ring[ring_id];
743	int i, depth=ring->depth;
744
745	if (!depth) {
746		WARN_ON(1);
747		dev_err(xmm->dev, "Tried destroying empty ring!\n");
748		return;
749	}
750
751	xmm7360_td_ring_deactivate(xmm, ring_id);
752
753	for (i=0; i<depth; i++) {
754		dma_free_coherent(xmm->dev, ring->page_size, ring->pages[i], ring->pages_phys[i]);
755	}
756
757	kfree(ring->pages_phys);
758	kfree(ring->pages);
759
760	dma_free_coherent(xmm->dev, sizeof(struct td_ring_entry)*depth, ring->tds, ring->tds_phys);
761
762	ring->depth = 0;
763}
764
765static void xmm7360_td_ring_write(struct xmm_dev *xmm, u8 ring_id, const void *buf, int len)
766{
767	struct td_ring *ring = &xmm->td_ring[ring_id];
768	u8 wptr = xmm->cp->s_wptr[ring_id];
769
770	BUG_ON(!ring->depth);
771	BUG_ON(len > ring->page_size);
772	BUG_ON(ring_id & 1);
773
774	memcpy(ring->pages[wptr], buf, len);
775	ring->tds[wptr].length = len;
776	ring->tds[wptr].flags = 0;
777	ring->tds[wptr].unk = 0;
778
779	wptr = (wptr + 1) & (ring->depth - 1);
780	BUG_ON(wptr == xmm->cp->s_rptr[ring_id]);
781
782	xmm->cp->s_wptr[ring_id] = wptr;
783}
784
785static int xmm7360_td_ring_full(struct xmm_dev *xmm, u8 ring_id)
786{
787	struct td_ring *ring = &xmm->td_ring[ring_id];
788	u8 wptr = xmm->cp->s_wptr[ring_id];
789	wptr = (wptr + 1) & (ring->depth - 1);
790	return wptr == xmm->cp->s_rptr[ring_id];
791}
792
793static void xmm7360_td_ring_read(struct xmm_dev *xmm, u8 ring_id)
794{
795	struct td_ring *ring = &xmm->td_ring[ring_id];
796	u8 wptr = xmm->cp->s_wptr[ring_id];
797
798	if (!ring->depth) {
799		dev_err(xmm->dev, "read on disabled ring\n");
800		WARN_ON(1);
801		return;
802	}
803	if (!(ring_id & 1)) {
804		dev_err(xmm->dev, "read on write ring\n");
805		WARN_ON(1);
806		return;
807	}
808
809	ring->tds[wptr].length = ring->page_size;
810	ring->tds[wptr].flags = 0;
811	ring->tds[wptr].unk = 0;
812
813	wptr = (wptr + 1) & (ring->depth - 1);
814	BUG_ON(wptr == xmm->cp->s_rptr[ring_id]);
815
816	xmm->cp->s_wptr[ring_id] = wptr;
817}
818
819static struct queue_pair * xmm7360_init_qp(struct xmm_dev *xmm, int num, u8 depth, u16 page_size)
820{
821	struct queue_pair *qp = &xmm->qp[num];
822
823	qp->xmm = xmm;
824	qp->num = num;
825	qp->open = 0;
826	qp->depth = depth;
827	qp->page_size = page_size;
828
829	mutex_init(&qp->lock);
830	init_waitqueue_head(&qp->wq);
831	return qp;
832}
833
834static void xmm7360_qp_arm(struct xmm_dev *xmm, struct queue_pair *qp)
835{
836	while (!xmm7360_td_ring_full(xmm, qp->num*2+1))
837		xmm7360_td_ring_read(xmm, qp->num*2+1);
838	xmm7360_ding(xmm, DOORBELL_TD);
839}
840
841static int xmm7360_qp_start(struct queue_pair *qp)
842{
843	struct xmm_dev *xmm = qp->xmm;
844	int ret;
845
846	mutex_lock(&qp->lock);
847	if (qp->open) {
848		ret = -EBUSY;
849	} else {
850		ret = 0;
851		qp->open = 1;
852	}
853	mutex_unlock(&qp->lock);
854
855	if (ret == 0) {
856		xmm7360_td_ring_create(xmm, qp->num*2, qp->depth, qp->page_size);
857		xmm7360_td_ring_create(xmm, qp->num*2+1, qp->depth, qp->page_size);
858		xmm7360_qp_arm(xmm, qp);
859	}
860
861	return ret;
862}
863
864static void xmm7360_qp_resume(struct queue_pair *qp)
865{
866	struct xmm_dev *xmm = qp->xmm;
867
868	BUG_ON(!qp->open);
869	xmm7360_td_ring_activate(xmm, qp->num*2);
870	xmm7360_td_ring_activate(xmm, qp->num*2+1);
871	xmm7360_qp_arm(xmm, qp);
872}
873
874static int xmm7360_qp_stop(struct queue_pair *qp)
875{
876	struct xmm_dev *xmm = qp->xmm;
877	int ret = 0;
878
879	mutex_lock(&qp->lock);
880	if (!qp->open) {
881		ret = -ENODEV;
882	} else {
883		ret = 0;
884		/* still holding qp->open to prevent concurrent access */
885	}
886	mutex_unlock(&qp->lock);
887
888	if (ret == 0) {
889		xmm7360_td_ring_destroy(xmm, qp->num*2);
890		xmm7360_td_ring_destroy(xmm, qp->num*2+1);
891
892		mutex_lock(&qp->lock);
893		qp->open = 0;
894		mutex_unlock(&qp->lock);
895	}
896
897	return ret;
898}
899
900static void xmm7360_qp_suspend(struct queue_pair *qp)
901{
902	struct xmm_dev *xmm = qp->xmm;
903
904	BUG_ON(!qp->open);
905	xmm7360_td_ring_deactivate(xmm, qp->num*2);
906}
907
908static int xmm7360_qp_can_write(struct queue_pair *qp)
909{
910	struct xmm_dev *xmm = qp->xmm;
911	return !xmm7360_td_ring_full(xmm, qp->num*2);
912}
913
914static ssize_t xmm7360_qp_write(struct queue_pair *qp, const char *buf, size_t size)
915{
916	struct xmm_dev *xmm = qp->xmm;
917	int page_size = qp->xmm->td_ring[qp->num*2].page_size;
918	if (xmm->error)
919		return xmm->error;
920	if (!xmm7360_qp_can_write(qp))
921		return 0;
922	if (size > page_size)
923		size = page_size;
924	xmm7360_td_ring_write(xmm, qp->num*2, buf, size);
925	xmm7360_ding(xmm, DOORBELL_TD);
926	return size;
927}
928
929static ssize_t xmm7360_qp_write_user(struct queue_pair *qp, const char __user *buf, size_t size)
930{
931	int page_size = qp->xmm->td_ring[qp->num*2].page_size;
932	int ret;
933
934	if (size > page_size)
935		size = page_size;
936
937	ret = copy_from_user(qp->user_buf, buf, size);
938	size = size - ret;
939	if (!size)
940		return 0;
941	return xmm7360_qp_write(qp, qp->user_buf, size);
942}
943
944static int xmm7360_qp_has_data(struct queue_pair *qp)
945{
946	struct xmm_dev *xmm = qp->xmm;
947	struct td_ring *ring = &xmm->td_ring[qp->num*2+1];
948
949	return (xmm->cp->s_rptr[qp->num*2+1] != ring->last_handled);
950}
951
952static ssize_t xmm7360_qp_read_user(struct queue_pair *qp, char __user *buf, size_t size)
953{
954	struct xmm_dev *xmm = qp->xmm;
955	struct td_ring *ring = &xmm->td_ring[qp->num*2+1];
956	int idx, nread, ret;
957	// XXX locking?
958	ret = wait_event_interruptible(qp->wq, xmm7360_qp_has_data(qp) || xmm->error);
959	if (ret < 0)
960		return ret;
961	if (xmm->error)
962		return xmm->error;
963
964	idx = ring->last_handled;
965	nread = ring->tds[idx].length;
966	if (nread > size)
967		nread = size;
968	ret = copy_to_user(buf, ring->pages[idx], nread);
969	nread -= ret;
970	if (nread == 0)
971		return 0;
972
973	// XXX all data not fitting into buf+size is discarded
974	xmm7360_td_ring_read(xmm, qp->num*2+1);
975	xmm7360_ding(xmm, DOORBELL_TD);
976	ring->last_handled = (idx + 1) & (ring->depth - 1);
977
978	return nread;
979}
980
981static void xmm7360_tty_poll_qp(struct queue_pair *qp)
982{
983	struct xmm_dev *xmm = qp->xmm;
984	struct td_ring *ring = &xmm->td_ring[qp->num*2+1];
985	int idx, nread;
986	while (xmm7360_qp_has_data(qp)) {
987		idx = ring->last_handled;
988		nread = ring->tds[idx].length;
989		xmm7360_os_handle_tty_idata(qp, ring->pages[idx], nread);
990
991		xmm7360_td_ring_read(xmm, qp->num*2+1);
992		xmm7360_ding(xmm, DOORBELL_TD);
993		ring->last_handled = (idx + 1) & (ring->depth - 1);
994	}
995}
996
997#ifdef __linux__
998
999static void xmm7360_os_handle_tty_idata(struct queue_pair *qp, const u8 *data, size_t nread)
1000{
1001	tty_insert_flip_string(&qp->port, data, nread);
1002	tty_flip_buffer_push(&qp->port);
1003}
1004
1005int xmm7360_cdev_open (struct inode *inode, struct file *file)
1006{
1007	struct queue_pair *qp = container_of(inode->i_cdev, struct queue_pair, cdev);
1008	file->private_data = qp;
1009	return xmm7360_qp_start(qp);
1010}
1011
1012int xmm7360_cdev_release (struct inode *inode, struct file *file)
1013{
1014	struct queue_pair *qp = file->private_data;
1015	return xmm7360_qp_stop(qp);
1016}
1017
1018ssize_t xmm7360_cdev_write (struct file *file, const char __user *buf, size_t size, loff_t *offset)
1019{
1020	struct queue_pair *qp = file->private_data;
1021	int ret;
1022
1023	ret = xmm7360_qp_write_user(qp, buf, size);
1024	if (ret < 0)
1025		return ret;
1026
1027	*offset += ret;
1028	return ret;
1029}
1030
1031ssize_t xmm7360_cdev_read (struct file *file, char __user *buf, size_t size, loff_t *offset)
1032{
1033	struct queue_pair *qp = file->private_data;
1034	int ret;
1035
1036	ret = xmm7360_qp_read_user(qp, buf, size);
1037	if (ret < 0)
1038		return ret;
1039
1040	*offset += ret;
1041	return ret;
1042}
1043
1044static unsigned int xmm7360_cdev_poll(struct file *file, poll_table *wait)
1045{
1046	struct queue_pair *qp = file->private_data;
1047	unsigned int mask = 0;
1048
1049	poll_wait(file, &qp->wq, wait);
1050
1051	if (qp->xmm->error)
1052		return POLLHUP;
1053
1054	if (xmm7360_qp_has_data(qp))
1055		mask |= POLLIN | POLLRDNORM;
1056
1057	if (xmm7360_qp_can_write(qp))
1058		mask |= POLLOUT | POLLWRNORM;
1059
1060	return mask;
1061}
1062
1063static long xmm7360_cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1064{
1065	struct queue_pair *qp = file->private_data;
1066
1067	u32 val;
1068
1069	switch (cmd) {
1070		case XMM7360_IOCTL_GET_PAGE_SIZE:
1071			val = qp->xmm->td_ring[qp->num*2].page_size;
1072			if (copy_to_user((u32*)arg, &val, sizeof(u32)))
1073				return -EFAULT;
1074			return 0;
1075	}
1076
1077	return -ENOTTY;
1078}
1079
1080static struct file_operations xmm7360_fops = {
1081	.read		= xmm7360_cdev_read,
1082	.write		= xmm7360_cdev_write,
1083	.poll		= xmm7360_cdev_poll,
1084	.unlocked_ioctl	= xmm7360_cdev_ioctl,
1085	.open		= xmm7360_cdev_open,
1086	.release	= xmm7360_cdev_release
1087};
1088
1089#endif /* __linux__ */
1090
1091static void xmm7360_mux_frame_init(struct xmm_net *xn, struct mux_frame *frame, int sequence)
1092{
1093	frame->sequence = xn->sequence;
1094	frame->max_size = xn->xmm->td_ring[0].page_size;
1095	frame->n_packets = 0;
1096	frame->n_bytes = 0;
1097	frame->last_tag_next = NULL;
1098	frame->last_tag_length = NULL;
1099}
1100
1101static void xmm7360_mux_frame_add_tag(struct mux_frame *frame, uint32_t tag, uint16_t extra, void *data, int data_len)
1102{
1103	int total_length;
1104	if (frame->n_bytes == 0)
1105		total_length = sizeof(struct mux_first_header) + data_len;
1106	else
1107		total_length = sizeof(struct mux_next_header) + data_len;
1108
1109	while (frame->n_bytes & 3)
1110		frame->n_bytes++;
1111
1112	BUG_ON(frame->n_bytes + total_length > frame->max_size);
1113
1114	if (frame->last_tag_next)
1115		*frame->last_tag_next = frame->n_bytes;
1116
1117	if (frame->n_bytes == 0) {
1118		struct mux_first_header *hdr = (struct mux_first_header *)frame->data;
1119		memset(hdr, 0, sizeof(struct mux_first_header));
1120		hdr->tag = htonl(tag);
1121		hdr->sequence = frame->sequence;
1122		hdr->length = total_length;
1123		hdr->extra = extra;
1124		frame->last_tag_length = &hdr->length;
1125		frame->last_tag_next = &hdr->next;
1126		frame->n_bytes += sizeof(struct mux_first_header);
1127	} else {
1128		struct mux_next_header *hdr = (struct mux_next_header *)(&frame->data[frame->n_bytes]);
1129		memset(hdr, 0, sizeof(struct mux_next_header));
1130		hdr->tag = htonl(tag);
1131		hdr->length = total_length;
1132		hdr->extra = extra;
1133		frame->last_tag_length = &hdr->length;
1134		frame->last_tag_next = &hdr->next;
1135		frame->n_bytes += sizeof(struct mux_next_header);
1136	}
1137
1138	if (data_len) {
1139		memcpy(&frame->data[frame->n_bytes], data, data_len);
1140		frame->n_bytes += data_len;
1141	}
1142}
1143
1144static void xmm7360_mux_frame_append_data(struct mux_frame *frame, const void *data, int data_len)
1145{
1146	BUG_ON(frame->n_bytes + data_len > frame->max_size);
1147	BUG_ON(!frame->last_tag_length);
1148
1149	memcpy(&frame->data[frame->n_bytes], data, data_len);
1150	*frame->last_tag_length += data_len;
1151	frame->n_bytes += data_len;
1152}
1153
1154static int xmm7360_mux_frame_append_packet(struct mux_frame *frame, const void *data, int data_len)
1155{
1156	int expected_adth_size = sizeof(struct mux_next_header) + 4 + (frame->n_packets+1)*sizeof(struct mux_bounds);
1157	uint8_t pad[16];
1158
1159	if (frame->n_packets >= MUX_MAX_PACKETS)
1160		return -1;
1161
1162	if (frame->n_bytes + data_len + 16 + expected_adth_size > frame->max_size)
1163		return -1;
1164
1165	BUG_ON(!frame->last_tag_length);
1166
1167	frame->bounds[frame->n_packets].offset = frame->n_bytes;
1168	frame->bounds[frame->n_packets].length = data_len + 16;
1169	frame->n_packets++;
1170
1171	memset(pad, 0, sizeof(pad));
1172	xmm7360_mux_frame_append_data(frame, pad, 16);
1173	xmm7360_mux_frame_append_data(frame, data, data_len);
1174	return 0;
1175}
1176
1177static int xmm7360_mux_frame_push(struct xmm_dev *xmm, struct mux_frame *frame)
1178{
1179	struct mux_first_header *hdr = (void*)&frame->data[0];
1180	int ret;
1181	hdr->length = frame->n_bytes;
1182
1183	ret = xmm7360_qp_write(xmm->net->qp, frame->data, frame->n_bytes);
1184	if (ret < 0)
1185		return ret;
1186	return 0;
1187}
1188
1189static int xmm7360_mux_control(struct xmm_net *xn, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
1190{
1191	struct mux_frame *frame = &xn->frame;
1192	int ret;
1193	uint32_t cmdh_args[] = {arg1, arg2, arg3, arg4};
1194	unsigned long flags __unused;
1195
1196	spin_lock_irqsave(&xn->lock, flags);
1197
1198	xmm7360_mux_frame_init(xn, frame, 0);
1199	xmm7360_mux_frame_add_tag(frame, XMM_TAG_ACBH, 0, NULL, 0);
1200	xmm7360_mux_frame_add_tag(frame, XMM_TAG_CMDH, xn->channel, cmdh_args, sizeof(cmdh_args));
1201	ret = xmm7360_mux_frame_push(xn->xmm, frame);
1202
1203	spin_unlock_irqrestore(&xn->lock, flags);
1204
1205	return ret;
1206}
1207
1208static void xmm7360_net_flush(struct xmm_net *xn)
1209{
1210	struct mux_frame *frame = &xn->frame;
1211	int ret;
1212	u32 unknown = 0;
1213
1214#ifdef __linux__
1215	/* Never called with empty queue */
1216	BUG_ON(skb_queue_empty(&xn->queue));
1217#endif
1218	BUG_ON(!xmm7360_qp_can_write(xn->qp));
1219
1220	xmm7360_mux_frame_init(xn, frame, xn->sequence++);
1221	xmm7360_mux_frame_add_tag(frame, XMM_TAG_ADBH, 0, NULL, 0);
1222
1223	xmm7360_os_handle_net_dequeue(xn, frame);
1224	xn->queued_packets = xn->queued_bytes = 0;
1225
1226	xmm7360_mux_frame_add_tag(frame, XMM_TAG_ADTH, xn->channel, &unknown, sizeof(uint32_t));
1227	xmm7360_mux_frame_append_data(frame, &frame->bounds[0], sizeof(struct mux_bounds)*frame->n_packets);
1228
1229	ret = xmm7360_mux_frame_push(xn->xmm, frame);
1230	if (ret)
1231		goto drop;
1232
1233	return;
1234
1235drop:
1236	dev_err(xn->xmm->dev, "Failed to ship coalesced frame");
1237}
1238
1239static int xmm7360_base_init(struct xmm_dev *xmm)
1240{
1241	int ret, i;
1242	u32 status;
1243
1244	xmm->error = 0;
1245	xmm->num_ttys = 0;
1246
1247	status = xmm->bar2[BAR2_STATUS];
1248	if (status == XMM_MODEM_BOOTING) {
1249		dev_info(xmm->dev, "modem still booting, waiting...\n");
1250		for (i=0; i<100; i++) {
1251			status = xmm->bar2[BAR2_STATUS];
1252			if (status != XMM_MODEM_BOOTING)
1253				break;
1254			xmm7360_os_msleep(200);
1255		}
1256	}
1257
1258	if (status != XMM_MODEM_READY) {
1259		dev_err(xmm->dev, "unknown modem status: 0x%08x\n", status);
1260		return -EINVAL;
1261	}
1262
1263	dev_info(xmm->dev, "modem is ready\n");
1264
1265	ret = xmm7360_cmd_ring_init(xmm);
1266	if (ret) {
1267		dev_err(xmm->dev, "Could not bring up command ring %d\n",
1268		    ret);
1269		return ret;
1270	}
1271
1272	return 0;
1273}
1274
1275static void xmm7360_net_mux_handle_frame(struct xmm_net *xn, u8 *data, int len)
1276{
1277	struct mux_first_header *first;
1278	struct mux_next_header *adth;
1279	int n_packets, i;
1280	struct mux_bounds *bounds;
1281
1282	first = (void*)data;
1283	if (ntohl(first->tag) == XMM_TAG_ACBH)
1284		return;
1285
1286	if (ntohl(first->tag) != XMM_TAG_ADBH) {
1287		dev_info(xn->xmm->dev, "Unexpected tag %x\n", first->tag);
1288		return;
1289	}
1290
1291	adth = (void*)(&data[first->next]);
1292	if (ntohl(adth->tag) != XMM_TAG_ADTH) {
1293		dev_err(xn->xmm->dev, "Unexpected tag %x, expected ADTH\n", adth->tag);
1294		return;
1295	}
1296
1297	n_packets = (adth->length - sizeof(struct mux_next_header) - 4) / sizeof(struct mux_bounds);
1298
1299	bounds = (void*)&data[first->next + sizeof(struct mux_next_header) + 4];
1300
1301	for (i=0; i<n_packets; i++) {
1302		if (!bounds[i].length)
1303			continue;
1304
1305		xmm7360_os_handle_net_frame(xn->xmm,
1306		    &data[bounds[i].offset], bounds[i].length);
1307	}
1308}
1309
1310static void xmm7360_net_poll(struct xmm_dev *xmm)
1311{
1312	struct queue_pair *qp;
1313	struct td_ring *ring;
1314	int idx, nread;
1315	struct xmm_net *xn = xmm->net;
1316	unsigned long flags __unused;
1317
1318	BUG_ON(!xn);
1319
1320	qp = xn->qp;
1321	ring = &xmm->td_ring[qp->num*2+1];
1322
1323	spin_lock_irqsave(&xn->lock, flags);
1324
1325	if (xmm7360_qp_can_write(qp))
1326		xmm7360_os_handle_net_txwake(xn);
1327
1328	while (xmm7360_qp_has_data(qp)) {
1329		idx = ring->last_handled;
1330		nread = ring->tds[idx].length;
1331		xmm7360_net_mux_handle_frame(xn, ring->pages[idx], nread);
1332
1333		xmm7360_td_ring_read(xmm, qp->num*2+1);
1334		xmm7360_ding(xmm, DOORBELL_TD);
1335		ring->last_handled = (idx + 1) & (ring->depth - 1);
1336	}
1337
1338	spin_unlock_irqrestore(&xn->lock, flags);
1339}
1340
1341#ifdef __linux__
1342
1343static void xmm7360_net_uninit(struct net_device *dev)
1344{
1345}
1346
1347static int xmm7360_net_open(struct net_device *dev)
1348{
1349	struct xmm_net *xn = netdev_priv(dev);
1350	xn->queued_packets = xn->queued_bytes = 0;
1351	skb_queue_purge(&xn->queue);
1352	netif_start_queue(dev);
1353	return xmm7360_mux_control(xn, 1, 0, 0, 0);
1354}
1355
1356static int xmm7360_net_close(struct net_device *dev)
1357{
1358	netif_stop_queue(dev);
1359	return 0;
1360}
1361
1362static int xmm7360_net_must_flush(struct xmm_net *xn, int new_packet_bytes)
1363{
1364	int frame_size;
1365	if (xn->queued_packets >= MUX_MAX_PACKETS)
1366		return 1;
1367
1368	frame_size = sizeof(struct mux_first_header) + xn->queued_bytes + sizeof(struct mux_next_header) + 4 + sizeof(struct mux_bounds)*xn->queued_packets;
1369
1370	frame_size += 16 + new_packet_bytes + sizeof(struct mux_bounds);
1371
1372	return frame_size > xn->frame.max_size;
1373}
1374
1375static enum hrtimer_restart xmm7360_net_deadline_cb(struct hrtimer *t)
1376{
1377	struct xmm_net *xn = container_of(t, struct xmm_net, deadline);
1378	unsigned long flags;
1379	spin_lock_irqsave(&xn->lock, flags);
1380	if (!skb_queue_empty(&xn->queue) && xmm7360_qp_can_write(xn->qp))
1381		xmm7360_net_flush(xn);
1382	spin_unlock_irqrestore(&xn->lock, flags);
1383	return HRTIMER_NORESTART;
1384}
1385
1386static netdev_tx_t xmm7360_net_xmit(struct sk_buff *skb, struct net_device *dev)
1387{
1388	struct xmm_net *xn = netdev_priv(dev);
1389	ktime_t kt;
1390	unsigned long flags;
1391
1392	if (netif_queue_stopped(dev))
1393		return NETDEV_TX_BUSY;
1394
1395	skb_orphan(skb);
1396
1397	spin_lock_irqsave(&xn->lock, flags);
1398	if (xmm7360_net_must_flush(xn, skb->len)) {
1399		if (xmm7360_qp_can_write(xn->qp)) {
1400			xmm7360_net_flush(xn);
1401		} else {
1402			netif_stop_queue(dev);
1403			spin_unlock_irqrestore(&xn->lock, flags);
1404			return NETDEV_TX_BUSY;
1405		}
1406	}
1407
1408	xn->queued_packets++;
1409	xn->queued_bytes += 16 + skb->len;
1410	skb_queue_tail(&xn->queue, skb);
1411
1412	spin_unlock_irqrestore(&xn->lock, flags);
1413
1414	if (!hrtimer_active(&xn->deadline)) {
1415		kt = ktime_set(0, 100000);
1416		hrtimer_start(&xn->deadline, kt, HRTIMER_MODE_REL);
1417	}
1418
1419	return NETDEV_TX_OK;
1420}
1421
1422static void xmm7360_os_handle_net_frame(struct xmm_dev *xmm, const u8 *buf, size_t sz)
1423{
1424	struct sk_buff *skb;
1425	void *p;
1426	u8 ip_version;
1427
1428	skb = dev_alloc_skb(sz + NET_IP_ALIGN);
1429	if (!skb)
1430		return;
1431	skb_reserve(skb, NET_IP_ALIGN);
1432	p = skb_put(skb, sz);
1433	memcpy(p, buf, sz);
1434
1435	skb->dev = xmm->netdev;
1436
1437	ip_version = skb->data[0] >> 4;
1438	if (ip_version == 4) {
1439		skb->protocol = htons(ETH_P_IP);
1440	} else if (ip_version == 6) {
1441		skb->protocol = htons(ETH_P_IPV6);
1442	} else {
1443		kfree_skb(skb);
1444		return;
1445	}
1446
1447	netif_rx(skb);
1448}
1449
1450static void xmm7360_os_handle_net_dequeue(struct xmm_net *xn, struct mux_frame *frame)
1451{
1452	struct sk_buff *skb;
1453	int ret;
1454
1455	while ((skb = skb_dequeue(&xn->queue))) {
1456		ret = xmm7360_mux_frame_append_packet(frame,
1457		    skb->data, skb->len);
1458		kfree_skb(skb);
1459		if (ret) {
1460			/* No more space in the frame */
1461			break;
1462		}
1463	}
1464}
1465
1466static void xmm7360_os_handle_net_txwake(struct xmm_net *xn)
1467{
1468	BUG_ON(!xmm7360_qp_can_write(xn->qp));
1469
1470	if (netif_queue_stopped(xn->xmm->netdev))
1471		netif_wake_queue(xn->xmm->netdev);
1472}
1473
1474static const struct net_device_ops xmm7360_netdev_ops = {
1475	.ndo_uninit		= xmm7360_net_uninit,
1476	.ndo_open		= xmm7360_net_open,
1477	.ndo_stop		= xmm7360_net_close,
1478	.ndo_start_xmit		= xmm7360_net_xmit,
1479};
1480
1481static void xmm7360_net_setup(struct net_device *dev)
1482{
1483	struct xmm_net *xn = netdev_priv(dev);
1484	spin_lock_init(&xn->lock);
1485	hrtimer_init(&xn->deadline, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1486	xn->deadline.function = xmm7360_net_deadline_cb;
1487	skb_queue_head_init(&xn->queue);
1488
1489	dev->netdev_ops = &xmm7360_netdev_ops;
1490
1491	dev->hard_header_len = 0;
1492	dev->addr_len = 0;
1493	dev->mtu = 1500;
1494	dev->min_mtu = 1500;
1495	dev->max_mtu = 1500;
1496
1497	dev->tx_queue_len = 1000;
1498
1499	dev->type = ARPHRD_NONE;
1500	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1501}
1502
1503static int xmm7360_create_net(struct xmm_dev *xmm, int num)
1504{
1505	struct net_device *netdev;
1506	struct xmm_net *xn;
1507	int ret;
1508
1509	netdev = alloc_netdev(sizeof(struct xmm_net), "wwan%d", NET_NAME_UNKNOWN, xmm7360_net_setup);
1510
1511	if (!netdev)
1512		return -ENOMEM;
1513
1514	SET_NETDEV_DEV(netdev, xmm->dev);
1515
1516	xmm->netdev = netdev;
1517
1518	xn = netdev_priv(netdev);
1519	xn->xmm = xmm;
1520	xmm->net = xn;
1521
1522	rtnl_lock();
1523	ret = register_netdevice(netdev);
1524	rtnl_unlock();
1525
1526	xn->qp = xmm7360_init_qp(xmm, num, 128, TD_MAX_PAGE_SIZE);
1527
1528	if (!ret)
1529		ret = xmm7360_qp_start(xn->qp);
1530
1531	if (ret < 0) {
1532		free_netdev(netdev);
1533		xmm->netdev = NULL;
1534		xmm7360_qp_stop(xn->qp);
1535	}
1536
1537	return ret;
1538}
1539
1540static void xmm7360_destroy_net(struct xmm_dev *xmm)
1541{
1542	if (xmm->netdev) {
1543		xmm7360_qp_stop(xmm->net->qp);
1544		rtnl_lock();
1545		unregister_netdevice(xmm->netdev);
1546		rtnl_unlock();
1547		free_netdev(xmm->netdev);
1548		xmm->net = NULL;
1549		xmm->netdev = NULL;
1550	}
1551}
1552
1553static irqreturn_t xmm7360_irq0(int irq, void *dev_id) {
1554	struct xmm_dev *xmm = dev_id;
1555	struct queue_pair *qp;
1556	int id;
1557
1558	xmm7360_poll(xmm);
1559	wake_up(&xmm->wq);
1560	if (xmm->td_ring) {
1561		if (xmm->net)
1562			xmm7360_net_poll(xmm);
1563
1564		for (id=1; id<XMM_QP_COUNT; id++) {
1565			qp = &xmm->qp[id];
1566
1567			/* wake _cdev_read() */
1568			if (qp->open)
1569				wake_up(&qp->wq);
1570
1571			/* tty tasks */
1572			if (qp->open && qp->port.ops) {
1573				xmm7360_tty_poll_qp(qp);
1574				if (qp->tty_needs_wake && xmm7360_qp_can_write(qp) && qp->port.tty) {
1575					struct tty_ldisc *ldisc = tty_ldisc_ref(qp->port.tty);
1576					if (ldisc) {
1577						if (ldisc->ops->write_wakeup)
1578							ldisc->ops->write_wakeup(qp->port.tty);
1579						tty_ldisc_deref(ldisc);
1580					}
1581					qp->tty_needs_wake = 0;
1582				}
1583			}
1584		}
1585	}
1586
1587	return IRQ_HANDLED;
1588}
1589
1590static dev_t xmm_base;
1591
1592static struct tty_driver *xmm7360_tty_driver;
1593
1594static void xmm7360_dev_deinit(struct xmm_dev *xmm)
1595{
1596	int i;
1597	xmm->error = -ENODEV;
1598
1599	cancel_work_sync(&xmm->init_work);
1600
1601	xmm7360_destroy_net(xmm);
1602
1603	for (i=0; i<XMM_QP_COUNT; i++) {
1604		if (xmm->qp[i].xmm) {
1605			if (xmm->qp[i].cdev.owner) {
1606				cdev_del(&xmm->qp[i].cdev);
1607				device_unregister(&xmm->qp[i].dev);
1608			}
1609			if (xmm->qp[i].port.ops) {
1610				tty_unregister_device(xmm7360_tty_driver, xmm->qp[i].tty_index);
1611				tty_port_destroy(&xmm->qp[i].port);
1612			}
1613		}
1614		memset(&xmm->qp[i], 0, sizeof(struct queue_pair));
1615	}
1616	xmm7360_cmd_ring_free(xmm);
1617
1618}
1619
1620static void xmm7360_remove(struct pci_dev *dev)
1621{
1622	struct xmm_dev *xmm = pci_get_drvdata(dev);
1623
1624	xmm7360_dev_deinit(xmm);
1625
1626	if (xmm->irq)
1627		free_irq(xmm->irq, xmm);
1628	pci_free_irq_vectors(dev);
1629	pci_release_region(dev, 0);
1630	pci_release_region(dev, 2);
1631	pci_disable_device(dev);
1632	kfree(xmm);
1633}
1634
1635static void xmm7360_cdev_dev_release(struct device *dev)
1636{
1637}
1638
1639static int xmm7360_tty_open(struct tty_struct *tty, struct file *filp)
1640{
1641	struct queue_pair *qp = tty->driver_data;
1642	return tty_port_open(&qp->port, tty, filp);
1643}
1644
1645static void xmm7360_tty_close(struct tty_struct *tty, struct file *filp)
1646{
1647	struct queue_pair *qp = tty->driver_data;
1648	if (qp)
1649		tty_port_close(&qp->port, tty, filp);
1650}
1651
1652static int xmm7360_tty_write(struct tty_struct *tty, const unsigned char *buffer,
1653		      int count)
1654{
1655	struct queue_pair *qp = tty->driver_data;
1656	int written;
1657	written = xmm7360_qp_write(qp, buffer, count);
1658	if (written < count)
1659		qp->tty_needs_wake = 1;
1660	return written;
1661}
1662
1663static int xmm7360_tty_write_room(struct tty_struct *tty)
1664{
1665	struct queue_pair *qp = tty->driver_data;
1666	if (!xmm7360_qp_can_write(qp))
1667		return 0;
1668	else
1669		return qp->xmm->td_ring[qp->num*2].page_size;
1670}
1671
1672static int xmm7360_tty_install(struct tty_driver *driver, struct tty_struct *tty)
1673{
1674	struct queue_pair *qp;
1675	int ret;
1676
1677	ret = tty_standard_install(driver, tty);
1678	if (ret)
1679		return ret;
1680
1681	tty->port = driver->ports[tty->index];
1682	qp = container_of(tty->port, struct queue_pair, port);
1683	tty->driver_data = qp;
1684	return 0;
1685}
1686
1687
1688static int xmm7360_tty_port_activate(struct tty_port *tport, struct tty_struct *tty)
1689{
1690	struct queue_pair *qp = tty->driver_data;
1691	return xmm7360_qp_start(qp);
1692}
1693
1694static void xmm7360_tty_port_shutdown(struct tty_port *tport)
1695{
1696	struct queue_pair *qp = tport->tty->driver_data;
1697	xmm7360_qp_stop(qp);
1698}
1699
1700
1701static const struct tty_port_operations xmm7360_tty_port_ops = {
1702	.activate = xmm7360_tty_port_activate,
1703	.shutdown = xmm7360_tty_port_shutdown,
1704};
1705
1706static const struct tty_operations xmm7360_tty_ops = {
1707	.open = xmm7360_tty_open,
1708	.close = xmm7360_tty_close,
1709	.write = xmm7360_tty_write,
1710	.write_room = xmm7360_tty_write_room,
1711	.install = xmm7360_tty_install,
1712};
1713
1714static int xmm7360_create_tty(struct xmm_dev *xmm, int num)
1715{
1716	struct device *tty_dev;
1717	struct queue_pair *qp = xmm7360_init_qp(xmm, num, 8, 4096);
1718	int ret;
1719	tty_port_init(&qp->port);
1720	qp->port.low_latency = 1;
1721	qp->port.ops = &xmm7360_tty_port_ops;
1722	qp->tty_index = xmm->num_ttys++;
1723	tty_dev = tty_port_register_device(&qp->port, xmm7360_tty_driver, qp->tty_index, xmm->dev);
1724
1725	if (IS_ERR(tty_dev)) {
1726		qp->port.ops = NULL;	// prevent calling unregister
1727		ret = PTR_ERR(tty_dev);
1728		dev_err(xmm->dev, "Could not allocate tty?\n");
1729		tty_port_destroy(&qp->port);
1730		return ret;
1731	}
1732
1733	return 0;
1734}
1735
1736static int xmm7360_create_cdev(struct xmm_dev *xmm, int num, const char *name, int cardnum)
1737{
1738	struct queue_pair *qp = xmm7360_init_qp(xmm, num, 16, TD_MAX_PAGE_SIZE);
1739	int ret;
1740
1741	cdev_init(&qp->cdev, &xmm7360_fops);
1742	qp->cdev.owner = THIS_MODULE;
1743	device_initialize(&qp->dev);
1744	qp->dev.devt = MKDEV(MAJOR(xmm_base), num); // XXX multiple cards
1745	qp->dev.parent = &xmm->pci_dev->dev;
1746	qp->dev.release = xmm7360_cdev_dev_release;
1747	dev_set_name(&qp->dev, name, cardnum);
1748	dev_set_drvdata(&qp->dev, qp);
1749	ret = cdev_device_add(&qp->cdev, &qp->dev);
1750	if (ret) {
1751		dev_err(xmm->dev, "cdev_device_add: %d\n", ret);
1752		return ret;
1753	}
1754	return 0;
1755}
1756
1757static int xmm7360_dev_init(struct xmm_dev *xmm)
1758{
1759	int ret;
1760
1761	ret = xmm7360_base_init(xmm);
1762	if (ret)
1763		return ret;
1764
1765	ret = xmm7360_create_cdev(xmm, 1, "xmm%d/rpc", xmm->card_num);
1766	if (ret)
1767		return ret;
1768	ret = xmm7360_create_cdev(xmm, 3, "xmm%d/trace", xmm->card_num);
1769	if (ret)
1770		return ret;
1771	ret = xmm7360_create_tty(xmm, 2);
1772	if (ret)
1773		return ret;
1774	ret = xmm7360_create_tty(xmm, 4);
1775	if (ret)
1776		return ret;
1777	ret = xmm7360_create_tty(xmm, 7);
1778	if (ret)
1779		return ret;
1780	ret = xmm7360_create_net(xmm, 0);
1781	if (ret)
1782		return ret;
1783
1784	return 0;
1785}
1786
1787void xmm7360_dev_init_work(struct work_struct *work)
1788{
1789	struct xmm_dev *xmm = container_of(work, struct xmm_dev, init_work);
1790	xmm7360_dev_init(xmm);
1791}
1792
1793static int xmm7360_probe(struct pci_dev *dev, const struct pci_device_id *id)
1794{
1795	struct xmm_dev *xmm = kzalloc(sizeof(struct xmm_dev), GFP_KERNEL);
1796	int ret;
1797
1798	xmm->pci_dev = dev;
1799	xmm->dev = &dev->dev;
1800
1801	if (!xmm) {
1802		dev_err(&(dev->dev), "kzalloc\n");
1803		return -ENOMEM;
1804	}
1805
1806	ret = pci_enable_device(dev);
1807	if (ret) {
1808		dev_err(&(dev->dev), "pci_enable_device\n");
1809		goto fail;
1810	}
1811	pci_set_master(dev);
1812
1813	ret = pci_set_dma_mask(dev, 0xffffffffffffffff);
1814	if (ret) {
1815		dev_err(xmm->dev, "Cannot set DMA mask\n");
1816		goto fail;
1817	}
1818	dma_set_coherent_mask(xmm->dev, 0xffffffffffffffff);
1819
1820
1821	ret = pci_request_region(dev, 0, "xmm0");
1822	if (ret) {
1823		dev_err(&(dev->dev), "pci_request_region(0)\n");
1824		goto fail;
1825	}
1826	xmm->bar0 = pci_iomap(dev, 0, pci_resource_len(dev, 0));
1827
1828	ret = pci_request_region(dev, 2, "xmm2");
1829	if (ret) {
1830		dev_err(&(dev->dev), "pci_request_region(2)\n");
1831		goto fail;
1832	}
1833	xmm->bar2 = pci_iomap(dev, 2, pci_resource_len(dev, 2));
1834
1835	ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
1836	if (ret < 0) {
1837		dev_err(&(dev->dev), "pci_alloc_irq_vectors\n");
1838		goto fail;
1839	}
1840
1841	init_waitqueue_head(&xmm->wq);
1842	INIT_WORK(&xmm->init_work, xmm7360_dev_init_work);
1843
1844	pci_set_drvdata(dev, xmm);
1845
1846	ret = xmm7360_dev_init(xmm);
1847	if (ret)
1848		goto fail;
1849
1850	xmm->irq = pci_irq_vector(dev, 0);
1851	ret = request_irq(xmm->irq, xmm7360_irq0, 0, "xmm7360", xmm);
1852	if (ret) {
1853		dev_err(&(dev->dev), "request_irq\n");
1854		goto fail;
1855	}
1856
1857	return ret;
1858
1859fail:
1860	xmm7360_dev_deinit(xmm);
1861	xmm7360_remove(dev);
1862	return ret;
1863}
1864
1865static struct pci_driver xmm7360_driver = {
1866	.name		= "xmm7360",
1867	.id_table	= xmm7360_ids,
1868	.probe		= xmm7360_probe,
1869	.remove		= xmm7360_remove,
1870};
1871
1872static int xmm7360_init(void)
1873{
1874	int ret;
1875	ret = alloc_chrdev_region(&xmm_base, 0, 8, "xmm");
1876	if (ret)
1877		return ret;
1878
1879	xmm7360_tty_driver = alloc_tty_driver(8);
1880	if (!xmm7360_tty_driver)
1881		return -ENOMEM;
1882
1883	xmm7360_tty_driver->driver_name = "xmm7360";
1884	xmm7360_tty_driver->name = "ttyXMM";
1885	xmm7360_tty_driver->major = 0;
1886	xmm7360_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
1887	xmm7360_tty_driver->subtype = SERIAL_TYPE_NORMAL;
1888	xmm7360_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
1889	xmm7360_tty_driver->init_termios = tty_std_termios;
1890	xmm7360_tty_driver->init_termios.c_cflag = B115200 | CS8 | CREAD | \
1891						HUPCL | CLOCAL;
1892	xmm7360_tty_driver->init_termios.c_lflag &= ~ECHO;
1893	xmm7360_tty_driver->init_termios.c_ispeed = 115200;
1894	xmm7360_tty_driver->init_termios.c_ospeed = 115200;
1895	tty_set_operations(xmm7360_tty_driver, &xmm7360_tty_ops);
1896
1897	ret = tty_register_driver(xmm7360_tty_driver);
1898	if (ret) {
1899		pr_err("xmm7360: failed to register xmm7360_tty driver\n");
1900		return ret;
1901	}
1902
1903
1904	ret = pci_register_driver(&xmm7360_driver);
1905	if (ret)
1906		return ret;
1907
1908	return 0;
1909}
1910
1911static void xmm7360_exit(void)
1912{
1913	pci_unregister_driver(&xmm7360_driver);
1914	unregister_chrdev_region(xmm_base, 8);
1915	tty_unregister_driver(xmm7360_tty_driver);
1916	put_tty_driver(xmm7360_tty_driver);
1917}
1918
1919module_init(xmm7360_init);
1920module_exit(xmm7360_exit);
1921
1922#endif /* __linux__ */
1923
1924#if defined(__OpenBSD__) || defined(__NetBSD__)
1925
1926/*
1927 * RPC and trace devices behave as regular character device,
1928 * other devices behave as terminal.
1929 */
1930#define DEVCUA(x)	(minor(x) & 0x80)
1931#define DEVUNIT(x)	((minor(x) & 0x70) >> 4)
1932#define DEVFUNC_MASK	0x0f
1933#define DEVFUNC(x)	(minor(x) & DEVFUNC_MASK)
1934#define DEV_IS_TTY(x)	(DEVFUNC(x) == 2 || DEVFUNC(x) > 3)
1935
1936struct wwanc_softc {
1937#ifdef __OpenBSD__
1938	struct device		sc_devx;	/* gen. device info storage */
1939#endif
1940	struct device		*sc_dev;	/* generic device information */
1941        pci_chipset_tag_t       sc_pc;
1942        pcitag_t                sc_tag;
1943	bus_dma_tag_t		sc_dmat;
1944	pci_intr_handle_t	sc_pih;
1945        void                    *sc_ih;         /* interrupt vectoring */
1946
1947	bus_space_tag_t		sc_bar0_tag;
1948	bus_space_handle_t	sc_bar0_handle;
1949	bus_size_t		sc_bar0_sz;
1950	bus_space_tag_t		sc_bar2_tag;
1951	bus_space_handle_t	sc_bar2_handle;
1952	bus_size_t		sc_bar2_sz;
1953
1954	struct xmm_dev		sc_xmm;
1955	struct tty		*sc_tty[XMM_QP_COUNT];
1956	struct device		*sc_net;
1957	struct selinfo		sc_selr, sc_selw;
1958	bool			sc_resume;
1959};
1960
1961struct wwanc_attach_args {
1962	enum wwanc_type {
1963		WWMC_TYPE_RPC,
1964		WWMC_TYPE_TRACE,
1965		WWMC_TYPE_TTY,
1966		WWMC_TYPE_NET
1967	} aa_type;
1968};
1969
1970static int     wwanc_match(struct device *, cfdata_t, void *);
1971static void    wwanc_attach(struct device *, struct device *, void *);
1972static int     wwanc_detach(struct device *, int);
1973
1974#ifdef __OpenBSD__
1975static int     wwanc_activate(struct device *, int);
1976
1977struct cfattach wwanc_ca = {
1978        sizeof(struct wwanc_softc), wwanc_match, wwanc_attach,
1979        wwanc_detach, wwanc_activate
1980};
1981
1982struct cfdriver wwanc_cd = {
1983        NULL, "wwanc", DV_DULL
1984};
1985#endif
1986
1987#ifdef __NetBSD__
1988CFATTACH_DECL3_NEW(wwanc, sizeof(struct wwanc_softc),
1989   wwanc_match, wwanc_attach, wwanc_detach, NULL,
1990   NULL, NULL, DVF_DETACH_SHUTDOWN);
1991
1992static bool wwanc_pmf_suspend(device_t, const pmf_qual_t *);
1993static bool wwanc_pmf_resume(device_t, const pmf_qual_t *);
1994#endif /* __NetBSD__ */
1995
1996static int
1997wwanc_match(struct device *parent, cfdata_t match, void *aux)
1998{
1999	struct pci_attach_args *pa = aux;
2000
2001	return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_INTEL &&
2002		PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_XMM7360);
2003}
2004
2005static int xmm7360_dev_init(struct xmm_dev *xmm)
2006{
2007	int ret;
2008	int depth, page_size;
2009
2010	ret = xmm7360_base_init(xmm);
2011	if (ret)
2012		return ret;
2013
2014	/* Initialize queue pairs for later use */
2015	for (int num = 0; num < XMM_QP_COUNT; num++) {
2016		switch (num) {
2017		case 0:	/* net */
2018			depth = 128;
2019			page_size = TD_MAX_PAGE_SIZE;
2020			break;
2021		case 1:	/* rpc */
2022		case 3: /* trace */
2023			depth = 16;
2024			page_size = TD_MAX_PAGE_SIZE;
2025			break;
2026		default: /* tty */
2027			depth = 8;
2028			page_size = 4096;
2029			break;
2030		}
2031
2032		xmm7360_init_qp(xmm, num, depth, page_size);
2033	}
2034
2035	return 0;
2036}
2037
2038static void xmm7360_dev_deinit(struct xmm_dev *xmm)
2039{
2040	struct wwanc_softc *sc = device_private(xmm->dev);
2041	bool devgone = false;
2042	struct tty *tp;
2043
2044	xmm->error = -ENODEV;
2045
2046	/* network device should be gone by now */
2047	KASSERT(sc->sc_net == NULL);
2048	KASSERT(xmm->net == NULL);
2049
2050	/* free ttys */
2051	for (int i=0; i<XMM_QP_COUNT; i++) {
2052		tp = sc->sc_tty[i];
2053		if (tp) {
2054			KASSERT(DEV_IS_TTY(i));
2055			if (!devgone) {
2056				vdevgone(major(tp->t_dev), 0, DEVFUNC_MASK,
2057				    VCHR);
2058				devgone = true;
2059			}
2060			ttyfree(tp);
2061			sc->sc_tty[i] = NULL;
2062		}
2063	}
2064
2065	xmm7360_cmd_ring_free(xmm);
2066}
2067
2068static void
2069wwanc_io_wakeup(struct queue_pair *qp, int flag)
2070{
2071        if (flag & FREAD) {
2072                selnotify(&qp->selr, POLLIN|POLLRDNORM, NOTE_SUBMIT);
2073                wakeup(qp->wq);
2074        }
2075        if (flag & FWRITE) {
2076                selnotify(&qp->selw, POLLOUT|POLLWRNORM, NOTE_SUBMIT);
2077                wakeup(qp->wq);
2078        }
2079}
2080
2081static int
2082wwanc_intr(void *xsc)
2083{
2084	struct wwanc_softc *sc = xsc;
2085	struct xmm_dev *xmm = &sc->sc_xmm;
2086	struct queue_pair *qp;
2087
2088	xmm7360_poll(xmm);
2089	wakeup(&xmm->wq);
2090
2091	if (xmm->net && xmm->net->qp->open && xmm7360_qp_has_data(xmm->net->qp))
2092		xmm7360_net_poll(xmm);
2093
2094	for (int func = 1; func < XMM_QP_COUNT; func++) {
2095		qp = &xmm->qp[func];
2096		if (!qp->open)
2097			continue;
2098
2099		/* Check for input, wwancstart()/wwancwrite() does output */
2100		if (xmm7360_qp_has_data(qp)) {
2101			if (DEV_IS_TTY(func)) {
2102				int s = spltty();
2103				xmm7360_tty_poll_qp(qp);
2104				splx(s);
2105			}
2106			wwanc_io_wakeup(qp, FREAD);
2107		}
2108
2109		/* Wakeup/notify eventual writers */
2110		if (xmm7360_qp_can_write(qp))
2111			wwanc_io_wakeup(qp, FWRITE);
2112	}
2113
2114	return 1;
2115}
2116
2117static int
2118wwancprint(void *aux, const char *pnp)
2119{
2120	struct wwanc_attach_args *wa = aux;
2121
2122	if (pnp)
2123                printf("wwanc type %s at %s",
2124		    (wa->aa_type == WWMC_TYPE_NET) ? "net" : "unk", pnp);
2125	else
2126		printf(" type %s",
2127		    (wa->aa_type == WWMC_TYPE_NET) ? "net" : "unk");
2128
2129	return (UNCONF);
2130}
2131
2132static void
2133wwanc_attach_finish(struct device *self)
2134{
2135	struct wwanc_softc *sc = device_private(self);
2136
2137	if (xmm7360_dev_init(&sc->sc_xmm)) {
2138		/* error already printed */
2139		return;
2140	}
2141
2142	/* Attach the network device */
2143	struct wwanc_attach_args wa;
2144	memset(&wa, 0, sizeof(wa));
2145	wa.aa_type = WWMC_TYPE_NET;
2146	sc->sc_net = config_found(self, &wa, wwancprint, CFARGS_NONE);
2147}
2148
2149static void
2150wwanc_attach(struct device *parent, struct device *self, void *aux)
2151{
2152	struct wwanc_softc *sc = device_private(self);
2153	struct pci_attach_args *pa = aux;
2154	bus_space_tag_t memt;
2155	bus_space_handle_t memh;
2156	bus_size_t sz;
2157	int error;
2158	const char *intrstr;
2159#ifdef __OpenBSD__
2160	pci_intr_handle_t ih;
2161#endif
2162#ifdef __NetBSD__
2163	pci_intr_handle_t *ih;
2164	char intrbuf[PCI_INTRSTR_LEN];
2165#endif
2166
2167	sc->sc_dev = self;
2168	sc->sc_pc = pa->pa_pc;
2169	sc->sc_tag = pa->pa_tag;
2170	sc->sc_dmat = pa->pa_dmat;
2171
2172	/* map the register window, memory mapped 64-bit non-prefetchable */
2173	error = pci_mapreg_map(pa, WWAN_BAR0,
2174	    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT,
2175	    BUS_SPACE_MAP_LINEAR, &memt, &memh, NULL, &sz, 0);
2176	if (error != 0) {
2177		printf(": can't map mem space for BAR0 %d\n", error);
2178		return;
2179	}
2180	sc->sc_bar0_tag = memt;
2181	sc->sc_bar0_handle = memh;
2182	sc->sc_bar0_sz = sz;
2183
2184	error = pci_mapreg_map(pa, WWAN_BAR2,
2185	    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT,
2186	    BUS_SPACE_MAP_LINEAR, &memt, &memh, NULL, &sz, 0);
2187	if (error != 0) {
2188		bus_space_unmap(sc->sc_bar0_tag, sc->sc_bar0_handle,
2189		    sc->sc_bar0_sz);
2190		printf(": can't map mem space for BAR2\n");
2191		return;
2192	}
2193	sc->sc_bar2_tag = memt;
2194	sc->sc_bar2_handle = memh;
2195	sc->sc_bar2_sz = sz;
2196
2197	/* Set xmm members needed for xmm7360_dev_init() */
2198	sc->sc_xmm.dev = self;
2199	sc->sc_xmm.bar0 = bus_space_vaddr(sc->sc_bar0_tag, sc->sc_bar0_handle);
2200	sc->sc_xmm.bar2 = bus_space_vaddr(sc->sc_bar0_tag, sc->sc_bar2_handle);
2201	init_waitqueue_head(&sc->sc_xmm.wq);
2202
2203#ifdef __OpenBSD__
2204	if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
2205		printf(": can't map interrupt\n");
2206		goto fail;
2207	}
2208	sc->sc_pih = ih;
2209	intrstr = pci_intr_string(sc->sc_pc, ih);
2210	printf(": %s\n", intrstr);
2211#endif
2212#ifdef __NetBSD__
2213	if (pci_intr_alloc(pa, &ih, NULL, 0)) {
2214		printf(": can't map interrupt\n");
2215		goto fail;
2216	}
2217	sc->sc_pih = ih[0];
2218	intrstr = pci_intr_string(pa->pa_pc, ih[0], intrbuf, sizeof(intrbuf));
2219	aprint_normal(": LTE modem\n");
2220	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
2221#endif
2222
2223	/* Device initialized, can establish the interrupt now */
2224	sc->sc_ih = pci_intr_establish(sc->sc_pc, sc->sc_pih, IPL_NET,
2225	    wwanc_intr, sc, device_xname(sc->sc_dev));
2226	if (sc->sc_ih == NULL) {
2227		device_printf(self, "can't establish interrupt\n");
2228		return;
2229	}
2230
2231#ifdef __NetBSD__
2232	if (!pmf_device_register(self, wwanc_pmf_suspend, wwanc_pmf_resume))
2233		aprint_error_dev(self, "couldn't establish power handler\n");
2234#endif
2235
2236	/*
2237	 * Device initialization requires working interrupts, so need
2238	 * to postpone this until they are enabled.
2239	 */
2240	config_mountroot(self, wwanc_attach_finish);
2241	return;
2242
2243fail:
2244	bus_space_unmap(sc->sc_bar0_tag, sc->sc_bar0_handle, sc->sc_bar0_sz);
2245	sc->sc_bar0_tag = 0;
2246	bus_space_unmap(sc->sc_bar2_tag, sc->sc_bar2_handle, sc->sc_bar2_sz);
2247	sc->sc_bar2_tag = 0;
2248	return;
2249}
2250
2251static int
2252wwanc_detach(struct device *self, int flags)
2253{
2254	int error;
2255	struct wwanc_softc *sc = device_private(self);
2256
2257	if (sc->sc_ih) {
2258		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2259		sc->sc_ih = NULL;
2260	}
2261
2262	if (sc->sc_net) {
2263		error = config_detach_children(self, flags);
2264		if (error)
2265			return error;
2266		sc->sc_net = NULL;
2267	}
2268
2269	pmf_device_deregister(self);
2270
2271	xmm7360_dev_deinit(&sc->sc_xmm);
2272
2273	if (sc->sc_bar0_tag) {
2274		bus_space_unmap(sc->sc_bar0_tag, sc->sc_bar0_handle,
2275		    sc->sc_bar0_sz);
2276		sc->sc_bar0_tag = 0;
2277	}
2278	sc->sc_xmm.bar0 = NULL;
2279
2280	if (sc->sc_bar2_tag) {
2281		bus_space_unmap(sc->sc_bar2_tag, sc->sc_bar2_handle,
2282		    sc->sc_bar2_sz);
2283		sc->sc_bar2_tag = 0;
2284	}
2285	sc->sc_xmm.bar2 = NULL;
2286
2287	return 0;
2288}
2289
2290static void
2291wwanc_suspend(struct device *self)
2292{
2293	struct wwanc_softc *sc = device_private(self);
2294	struct xmm_dev *xmm = &sc->sc_xmm;
2295	struct queue_pair *qp;
2296
2297	KASSERT(!sc->sc_resume);
2298	KASSERT(xmm->cp != NULL);
2299
2300	for (int i = 0; i < XMM_QP_COUNT; i++) {
2301		qp = &xmm->qp[i];
2302		if (qp->open)
2303			xmm7360_qp_suspend(qp);
2304	}
2305
2306	xmm7360_cmd_ring_free(xmm);
2307	KASSERT(xmm->cp == NULL);
2308}
2309
2310static void
2311wwanc_resume(struct device *self)
2312{
2313	struct wwanc_softc *sc = device_private(self);
2314	struct xmm_dev *xmm = &sc->sc_xmm;
2315	struct queue_pair *qp;
2316
2317	KASSERT(xmm->cp == NULL);
2318
2319	xmm7360_base_init(xmm);
2320
2321	for (int i = 0; i < XMM_QP_COUNT; i++) {
2322		qp = &xmm->qp[i];
2323		if (qp->open)
2324			xmm7360_qp_resume(qp);
2325	}
2326}
2327
2328#ifdef __OpenBSD__
2329
2330static void
2331wwanc_defer_resume(void *xarg)
2332{
2333	struct device *self = xarg;
2334	struct wwanc_softc *sc = device_private(self);
2335
2336	tsleep(&sc->sc_resume, 0, "wwancdr", 2 * hz);
2337
2338	wwanc_resume(self);
2339
2340	(void)config_activate_children(self, DVACT_RESUME);
2341
2342	sc->sc_resume = false;
2343	kthread_exit(0);
2344}
2345
2346static int
2347wwanc_activate(struct device *self, int act)
2348{
2349	struct wwanc_softc *sc = device_private(self);
2350
2351	switch (act) {
2352	case DVACT_QUIESCE:
2353		(void)config_activate_children(self, act);
2354		break;
2355	case DVACT_SUSPEND:
2356		if (sc->sc_resume) {
2357			/* Refuse to suspend if resume still ongoing */
2358			device_printf(self,
2359			    "not suspending, resume still ongoing\n");
2360			return EBUSY;
2361		}
2362
2363		(void)config_activate_children(self, act);
2364		wwanc_suspend(self);
2365		break;
2366	case DVACT_RESUME:
2367		/*
2368		 * Modem reinitialization can take several seconds, defer
2369		 * it via kernel thread to avoid blocking the resume.
2370		 */
2371		sc->sc_resume = true;
2372		kthread_create(wwanc_defer_resume, self, NULL, "wwancres");
2373		break;
2374	default:
2375		break;
2376	}
2377
2378	return 0;
2379}
2380
2381cdev_decl(wwanc);
2382#endif /* __OpenBSD__ */
2383
2384#ifdef __NetBSD__
2385static bool
2386wwanc_pmf_suspend(device_t self, const pmf_qual_t *qual)
2387{
2388	wwanc_suspend(self);
2389	return true;
2390}
2391
2392static bool
2393wwanc_pmf_resume(device_t self, const pmf_qual_t *qual)
2394{
2395	wwanc_resume(self);
2396	return true;
2397}
2398
2399static dev_type_open(wwancopen);
2400static dev_type_close(wwancclose);
2401static dev_type_read(wwancread);
2402static dev_type_write(wwancwrite);
2403static dev_type_ioctl(wwancioctl);
2404static dev_type_poll(wwancpoll);
2405static dev_type_kqfilter(wwanckqfilter);
2406static dev_type_tty(wwanctty);
2407
2408const struct cdevsw wwanc_cdevsw = {
2409	.d_open = wwancopen,
2410	.d_close = wwancclose,
2411	.d_read = wwancread,
2412	.d_write = wwancwrite,
2413	.d_ioctl = wwancioctl,
2414	.d_stop = nullstop,
2415	.d_tty = wwanctty,
2416	.d_poll = wwancpoll,
2417	.d_mmap = nommap,
2418	.d_kqfilter = wwanckqfilter,
2419	.d_discard = nodiscard,
2420	.d_flag = D_TTY
2421};
2422#endif
2423
2424static int wwancparam(struct tty *, struct termios *);
2425static void wwancstart(struct tty *);
2426
2427static void xmm7360_os_handle_tty_idata(struct queue_pair *qp, const u8 *data, size_t nread)
2428{
2429	struct xmm_dev *xmm = qp->xmm;
2430	struct wwanc_softc *sc = device_private(xmm->dev);
2431	int func = qp->num;
2432	struct tty *tp = sc->sc_tty[func];
2433
2434	KASSERT(DEV_IS_TTY(func));
2435	KASSERT(tp);
2436
2437	for (int i = 0; i < nread; i++)
2438		LINESW(tp).l_rint(data[i], tp);
2439}
2440
2441int
2442wwancopen(dev_t dev, int flags, int mode, struct proc *p)
2443{
2444	int unit = DEVUNIT(dev);
2445	struct wwanc_softc *sc = device_lookup_private(&wwanc_cd, unit);
2446	struct tty *tp;
2447	int func, error;
2448
2449	if (sc == NULL)
2450		return ENXIO;
2451
2452	/* Only allow opening the rpc/trace/AT queue pairs */
2453	func = DEVFUNC(dev);
2454	if (func < 1 || func > 7)
2455		return ENXIO;
2456
2457	if (DEV_IS_TTY(dev)) {
2458		if (!sc->sc_tty[func]) {
2459			tp = sc->sc_tty[func] = ttymalloc(1000000);
2460
2461			tp->t_oproc = wwancstart;
2462		        tp->t_param = wwancparam;
2463			tp->t_dev = dev;
2464			tp->t_sc = (void *)sc;
2465		} else
2466			tp = sc->sc_tty[func];
2467
2468		if (!ISSET(tp->t_state, TS_ISOPEN)) {
2469			ttychars(tp);
2470			tp->t_iflag = TTYDEF_IFLAG;
2471			tp->t_oflag = TTYDEF_OFLAG;
2472			tp->t_lflag = TTYDEF_LFLAG;
2473			tp->t_cflag = TTYDEF_CFLAG;
2474			tp->t_ispeed = tp->t_ospeed = B115200;
2475			SET(tp->t_cflag, CS8 | CREAD | HUPCL | CLOCAL);
2476
2477			SET(tp->t_state, TS_CARR_ON);
2478		} else if (suser(p) != 0) {
2479			return EBUSY;
2480		}
2481
2482		error = LINESW(tp).l_open(dev, tp, p);
2483		if (error)
2484			return error;
2485	}
2486
2487	/* Initialize ring if qp not open yet */
2488	xmm7360_qp_start(&sc->sc_xmm.qp[func]);
2489
2490	return 0;
2491}
2492
2493int
2494wwancread(dev_t dev, struct uio *uio, int flag)
2495{
2496	struct wwanc_softc *sc = device_lookup_private(&wwanc_cd, DEVUNIT(dev));
2497	int func = DEVFUNC(dev);
2498
2499	KASSERT(sc != NULL);
2500
2501	if (DEV_IS_TTY(dev)) {
2502		struct tty *tp = sc->sc_tty[func];
2503
2504		return (LINESW(tp).l_read(tp, uio, flag));
2505	} else {
2506		struct queue_pair *qp = &sc->sc_xmm.qp[func];
2507		ssize_t ret;
2508		char *buf;
2509		size_t size, read = 0;
2510
2511#ifdef __OpenBSD__
2512		KASSERT(uio->uio_segflg == UIO_USERSPACE);
2513#endif
2514
2515		for (int i = 0; i < uio->uio_iovcnt; i++) {
2516			buf = uio->uio_iov[i].iov_base;
2517			size = uio->uio_iov[i].iov_len;
2518
2519			while (size > 0) {
2520				ret = xmm7360_qp_read_user(qp, buf, size);
2521				if (ret < 0) {
2522					/*
2523					 * This shadows -EPERM, but that is
2524					 * not returned by the call stack,
2525					 * so this condition is safe.
2526					 */
2527					return (ret == ERESTART) ? ret : -ret;
2528				}
2529
2530				KASSERT(ret > 0 && ret <= size);
2531				size -= ret;
2532				buf += ret;
2533				read += ret;
2534
2535				/* Reader will re-try if they want more */
2536				goto out;
2537			}
2538		}
2539
2540out:
2541		uio->uio_resid -= read;
2542		uio->uio_offset += read;
2543
2544		return 0;
2545	}
2546}
2547
2548int
2549wwancwrite(dev_t dev, struct uio *uio, int flag)
2550{
2551	struct wwanc_softc *sc = device_lookup_private(&wwanc_cd, DEVUNIT(dev));
2552	int func = DEVFUNC(dev);
2553
2554	if (DEV_IS_TTY(dev)) {
2555		struct tty *tp = sc->sc_tty[func];
2556
2557		return (LINESW(tp).l_write(tp, uio, flag));
2558	} else {
2559		struct queue_pair *qp = &sc->sc_xmm.qp[func];
2560		ssize_t ret;
2561		const char *buf;
2562		size_t size, wrote = 0;
2563
2564#ifdef __OpenBSD__
2565		KASSERT(uio->uio_segflg == UIO_USERSPACE);
2566#endif
2567
2568		for (int i = 0; i < uio->uio_iovcnt; i++) {
2569			buf = uio->uio_iov[i].iov_base;
2570			size = uio->uio_iov[i].iov_len;
2571
2572			while (size > 0) {
2573				ret = xmm7360_qp_write_user(qp, buf, size);
2574				if (ret < 0) {
2575					/*
2576					 * This shadows -EPERM, but that is
2577					 * not returned by the call stack,
2578					 * so this condition is safe.
2579					 */
2580					return (ret == ERESTART) ? ret : -ret;
2581				}
2582
2583				KASSERT(ret > 0 && ret <= size);
2584				size -= ret;
2585				buf += ret;
2586				wrote += ret;
2587			}
2588		}
2589
2590		uio->uio_resid -= wrote;
2591		uio->uio_offset += wrote;
2592
2593		return 0;
2594	}
2595}
2596
2597int
2598wwancioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2599{
2600	struct wwanc_softc *sc = device_lookup_private(&wwanc_cd, DEVUNIT(dev));
2601	int error;
2602
2603	if (DEV_IS_TTY(dev)) {
2604		struct tty *tp = sc->sc_tty[DEVFUNC(dev)];
2605		KASSERT(tp);
2606
2607		error = LINESW(tp).l_ioctl(tp, cmd, data, flag, p);
2608		if (error >= 0)
2609			return error;
2610		error = ttioctl(tp, cmd, data, flag, p);
2611		if (error >= 0)
2612			return error;
2613	}
2614
2615	return ENOTTY;
2616}
2617
2618int
2619wwancclose(dev_t dev, int flag, int mode, struct proc *p)
2620{
2621	struct wwanc_softc *sc = device_lookup_private(&wwanc_cd, DEVUNIT(dev));
2622	int func = DEVFUNC(dev);
2623
2624	if (DEV_IS_TTY(dev)) {
2625		struct tty *tp = sc->sc_tty[func];
2626		KASSERT(tp);
2627
2628		CLR(tp->t_state, TS_BUSY | TS_FLUSH);
2629		LINESW(tp).l_close(tp, flag, p);
2630		ttyclose(tp);
2631	}
2632
2633	xmm7360_qp_stop(&sc->sc_xmm.qp[func]);
2634
2635	return 0;
2636}
2637
2638struct tty *
2639wwanctty(dev_t dev)
2640{
2641	struct wwanc_softc *sc = device_lookup_private(&wwanc_cd, DEVUNIT(dev));
2642	struct tty *tp = sc->sc_tty[DEVFUNC(dev)];
2643
2644	KASSERT(DEV_IS_TTY(dev));
2645	KASSERT(tp);
2646
2647	return tp;
2648}
2649
2650static int
2651wwancparam(struct tty *tp, struct termios *t)
2652{
2653	struct wwanc_softc *sc __diagused = (struct wwanc_softc *)tp->t_sc;
2654	dev_t dev = tp->t_dev;
2655	int func __diagused = DEVFUNC(dev);
2656
2657	KASSERT(DEV_IS_TTY(dev));
2658	KASSERT(tp == sc->sc_tty[func]);
2659	/* Can't assert tty_locked(), it's not taken when called via ttioctl()*/
2660
2661	/* Nothing to set on hardware side, just copy values */
2662	tp->t_ispeed = t->c_ispeed;
2663	tp->t_ospeed = t->c_ospeed;
2664	tp->t_cflag = t->c_cflag;
2665
2666	return 0;
2667}
2668
2669static void
2670wwancstart(struct tty *tp)
2671{
2672	struct wwanc_softc *sc = (struct wwanc_softc *)tp->t_sc;
2673	dev_t dev = tp->t_dev;
2674	int func = DEVFUNC(dev);
2675	struct queue_pair *qp = &sc->sc_xmm.qp[func];
2676	int n, written;
2677
2678	KASSERT(DEV_IS_TTY(dev));
2679	KASSERT(tp == sc->sc_tty[func]);
2680	tty_locked(tp);
2681
2682	if (ISSET(tp->t_state, TS_BUSY) || !xmm7360_qp_can_write(qp))
2683		return;
2684	if (tp->t_outq.c_cc == 0)
2685		return;
2686
2687	/*
2688	 * If we can write, we can write full qb page_size amount of data.
2689	 * Once q_to_b() is called, the data must be trasmitted - q_to_b()
2690	 * removes them from the tty output queue. Partial write is not
2691	 * possible.
2692	 */
2693	KASSERT(sizeof(qp->user_buf) >= qp->page_size);
2694	SET(tp->t_state, TS_BUSY);
2695	n = q_to_b(&tp->t_outq, qp->user_buf, qp->page_size);
2696	KASSERT(n > 0);
2697	KASSERT(n <= qp->page_size);
2698	written = xmm7360_qp_write(qp, qp->user_buf, n);
2699	CLR(tp->t_state, TS_BUSY);
2700
2701	if (written != n) {
2702		dev_err(sc->sc_dev, "xmm7360_qp_write(%d) failed %d != %d\n",
2703		    func, written, n);
2704		/* nothing to recover, just return */
2705	}
2706}
2707
2708int
2709wwancpoll(dev_t dev, int events, struct proc *p)
2710{
2711	struct wwanc_softc *sc = device_lookup_private(&wwanc_cd, DEVUNIT(dev));
2712	int func = DEVFUNC(dev);
2713	struct queue_pair *qp = &sc->sc_xmm.qp[func];
2714	int mask = 0;
2715
2716	if (DEV_IS_TTY(dev)) {
2717#ifdef __OpenBSD__
2718		return ttpoll(dev, events, p);
2719#endif
2720#ifdef __NetBSD__
2721		struct tty *tp = sc->sc_tty[func];
2722
2723		return LINESW(tp).l_poll(tp, events, p);
2724#endif
2725	}
2726
2727	KASSERT(!DEV_IS_TTY(dev));
2728
2729	if (qp->xmm->error) {
2730		mask |= POLLHUP;
2731		goto out;
2732	}
2733
2734	if (xmm7360_qp_has_data(qp))
2735		mask |= POLLIN | POLLRDNORM;
2736
2737	if (xmm7360_qp_can_write(qp))
2738		mask |= POLLOUT | POLLWRNORM;
2739
2740out:
2741	if ((mask & events) == 0) {
2742		if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND))
2743			selrecord(p, &sc->sc_selr);
2744                if (events & (POLLOUT | POLLWRNORM))
2745                        selrecord(p, &sc->sc_selw);
2746	}
2747
2748	return mask & events;
2749}
2750
2751static void
2752filt_wwancrdetach(struct knote *kn)
2753{
2754	struct queue_pair *qp = (struct queue_pair *)kn->kn_hook;
2755	struct xmm_dev *xmm = qp->xmm;
2756	int func = qp - xmm->qp;
2757	struct wwanc_softc *sc = container_of(xmm, struct wwanc_softc, sc_xmm);
2758	struct tty *tp = sc->sc_tty[func];
2759
2760	tty_lock(tp);
2761	selremove_knote(&qp->selr, kn);
2762	tty_unlock(tp);
2763}
2764
2765static int
2766filt_wwancread(struct knote *kn, long hint)
2767{
2768	struct queue_pair *qp = (struct queue_pair *)kn->kn_hook;
2769
2770	kn->kn_data = 0;
2771
2772	if (!qp->open) {
2773		knote_set_eof(kn, 0);
2774		return (1);
2775	} else {
2776		kn->kn_data = xmm7360_qp_has_data(qp) ? 1 : 0;
2777	}
2778
2779	return (kn->kn_data > 0);
2780}
2781
2782static void
2783filt_wwancwdetach(struct knote *kn)
2784{
2785	struct queue_pair *qp = (struct queue_pair *)kn->kn_hook;
2786	struct xmm_dev *xmm = qp->xmm;
2787	int func = qp - xmm->qp;
2788	struct wwanc_softc *sc = container_of(xmm, struct wwanc_softc, sc_xmm);
2789	struct tty *tp = sc->sc_tty[func];
2790
2791	tty_lock(tp);
2792	selremove_knote(&qp->selw, kn);
2793	tty_unlock(tp);
2794}
2795
2796static int
2797filt_wwancwrite(struct knote *kn, long hint)
2798{
2799	struct queue_pair *qp = (struct queue_pair *)kn->kn_hook;
2800
2801	kn->kn_data = 0;
2802
2803	if (qp->open) {
2804		if (xmm7360_qp_can_write(qp))
2805			kn->kn_data = qp->page_size;
2806	}
2807
2808	return (kn->kn_data > 0);
2809}
2810
2811static const struct filterops wwancread_filtops = {
2812	XMM_KQ_ISFD_INITIALIZER,
2813	.f_attach	= NULL,
2814	.f_detach	= filt_wwancrdetach,
2815	.f_event	= filt_wwancread,
2816};
2817
2818static const struct filterops wwancwrite_filtops = {
2819	XMM_KQ_ISFD_INITIALIZER,
2820	.f_attach	= NULL,
2821	.f_detach	= filt_wwancwdetach,
2822	.f_event	= filt_wwancwrite,
2823};
2824
2825int
2826wwanckqfilter(dev_t dev, struct knote *kn)
2827{
2828	struct wwanc_softc *sc = device_lookup_private(&wwanc_cd, DEVUNIT(dev));
2829	int func = DEVFUNC(dev);
2830	struct queue_pair *qp = &sc->sc_xmm.qp[func];
2831	struct tty *tp = sc->sc_tty[func];
2832	struct selinfo *si;
2833
2834	if (DEV_IS_TTY(func))
2835		return ttkqfilter(dev, kn);
2836
2837	KASSERT(!DEV_IS_TTY(func));
2838
2839	switch (kn->kn_filter) {
2840	case EVFILT_READ:
2841		si = &qp->selr;
2842		kn->kn_fop = &wwancread_filtops;
2843		break;
2844	case EVFILT_WRITE:
2845		si = &qp->selw;
2846		kn->kn_fop = &wwancwrite_filtops;
2847		break;
2848	default:
2849		return (EINVAL);
2850	}
2851
2852	kn->kn_hook = (void *)qp;
2853
2854	tty_lock(tp);
2855	selrecord_knote(si, kn);
2856	tty_unlock(tp);
2857
2858	return (0);
2859}
2860
2861static void *
2862dma_alloc_coherent(struct device *self, size_t sz, dma_addr_t *physp, int flags)
2863{
2864	struct wwanc_softc *sc = device_private(self);
2865	bus_dma_segment_t seg;
2866	int nsegs;
2867	int error;
2868	caddr_t kva;
2869
2870	error = bus_dmamem_alloc(sc->sc_dmat, sz, 0, 0, &seg, 1, &nsegs,
2871	    BUS_DMA_WAITOK);
2872	if (error) {
2873		panic("%s: bus_dmamem_alloc(%lu) failed %d\n",
2874		    device_xname(self), (unsigned long)sz, error);
2875		/* NOTREACHED */
2876	}
2877
2878	KASSERT(nsegs == 1);
2879	KASSERT(seg.ds_len == round_page(sz));
2880
2881	error = bus_dmamem_map(sc->sc_dmat, &seg, nsegs, sz, &kva,
2882	    BUS_DMA_WAITOK | BUS_DMA_COHERENT);
2883	if (error) {
2884		panic("%s: bus_dmamem_alloc(%lu) failed %d\n",
2885		    device_xname(self), (unsigned long)sz, error);
2886		/* NOTREACHED */
2887	}
2888
2889	memset(kva, 0, sz);
2890	*physp = seg.ds_addr;
2891	return (void *)kva;
2892}
2893
2894static void
2895dma_free_coherent(struct device *self, size_t sz, volatile void *vaddr, dma_addr_t phys)
2896{
2897	struct wwanc_softc *sc = device_private(self);
2898	bus_dma_segment_t seg;
2899
2900	sz = round_page(sz);
2901
2902	bus_dmamem_unmap(sc->sc_dmat, __UNVOLATILE(vaddr), sz);
2903
2904	/* this does't need the exact seg returned by bus_dmamem_alloc() */
2905	memset(&seg, 0, sizeof(seg));
2906	seg.ds_addr = phys;
2907	seg.ds_len  = sz;
2908	bus_dmamem_free(sc->sc_dmat, &seg, 1);
2909}
2910
2911struct wwan_softc {
2912#ifdef __OpenBSD__
2913	struct device		sc_devx;	/* gen. device info storage */
2914#endif
2915	struct device		*sc_dev;	/* generic device */
2916	struct wwanc_softc	*sc_parent;	/* parent device */
2917	struct ifnet		sc_ifnet;	/* network-visible interface */
2918	struct xmm_net		sc_xmm_net;
2919};
2920
2921static void xmm7360_os_handle_net_frame(struct xmm_dev *xmm, const u8 *buf, size_t sz)
2922{
2923	struct wwanc_softc *sc = device_private(xmm->dev);
2924	struct wwan_softc *sc_if = device_private(sc->sc_net);
2925	struct ifnet *ifp = &sc_if->sc_ifnet;
2926	struct mbuf *m;
2927
2928	KASSERT(sz <= MCLBYTES);
2929
2930	MGETHDR(m, M_DONTWAIT, MT_DATA);
2931	if (!m)
2932		return;
2933	if (sz > MHLEN) {
2934		MCLGETI(m, M_DONTWAIT, NULL, sz);
2935		if ((m->m_flags & M_EXT) == 0) {
2936			m_freem(m);
2937			return;
2938		}
2939	}
2940	m->m_len = m->m_pkthdr.len = sz;
2941
2942	/*
2943	 * No explicit alignment necessary - there is no ethernet header,
2944	 * so IP address is already aligned.
2945	 */
2946	KASSERT(m->m_pkthdr.len == sz);
2947	m_copyback(m, 0, sz, (const void *)buf, M_NOWAIT);
2948
2949#ifdef __OpenBSD__
2950	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2951	ml_enqueue(&ml, m);
2952	if_input(ifp, &ml);
2953#endif
2954#ifdef __NetBSD__
2955	if_percpuq_enqueue(ifp->if_percpuq, m);
2956#endif
2957}
2958
2959static void
2960xmm7360_os_handle_net_dequeue(struct xmm_net *xn, struct mux_frame *frame)
2961{
2962	struct wwan_softc *sc_if =
2963		container_of(xn, struct wwan_softc, sc_xmm_net);
2964	struct ifnet *ifp = &sc_if->sc_ifnet;
2965	struct mbuf *m;
2966	int ret;
2967
2968	MUTEX_ASSERT_LOCKED(&xn->lock);
2969
2970	while ((m = ifq_deq_begin(&ifp->if_snd))) {
2971		/*
2972		 * xmm7360_mux_frame_append_packet() requires single linear
2973		 * buffer, so try m_defrag(). Another option would be
2974		 * using m_copydata() into an intermediate buffer.
2975		 */
2976		if (m->m_next) {
2977			if (m_defrag(m, M_DONTWAIT) != 0 || m->m_next) {
2978				/* Can't defrag, drop and continue */
2979				ifq_deq_commit(&ifp->if_snd, m);
2980				m_freem(m);
2981				continue;
2982			}
2983		}
2984
2985		ret = xmm7360_mux_frame_append_packet(frame,
2986		    mtod(m, void *), m->m_pkthdr.len);
2987		if (ret) {
2988			/* No more space in the frame */
2989			ifq_deq_rollback(&ifp->if_snd, m);
2990			break;
2991		}
2992		ifq_deq_commit(&ifp->if_snd, m);
2993
2994		/* Send a copy of the frame to the BPF listener */
2995		BPF_MTAP_OUT(ifp, m);
2996
2997		m_freem(m);
2998	}
2999}
3000
3001static void xmm7360_os_handle_net_txwake(struct xmm_net *xn)
3002{
3003	struct wwan_softc *sc_if =
3004		container_of(xn, struct wwan_softc, sc_xmm_net);
3005	struct ifnet *ifp = &sc_if->sc_ifnet;
3006
3007	MUTEX_ASSERT_LOCKED(&xn->lock);
3008
3009	KASSERT(xmm7360_qp_can_write(xn->qp));
3010	if (ifq_is_oactive(&ifp->if_snd)) {
3011		ifq_clr_oactive(&ifp->if_snd);
3012#ifdef __OpenBSD__
3013		ifq_restart(&ifp->if_snd);
3014#endif
3015#ifdef __NetBSD__
3016		if_schedule_deferred_start(ifp);
3017#endif
3018	}
3019}
3020
3021#ifdef __OpenBSD__
3022/*
3023 * Process received raw IPv4/IPv6 packet. There is no encapsulation.
3024 */
3025static int
3026wwan_if_input(struct ifnet *ifp, struct mbuf *m, void *cookie)
3027{
3028	const uint8_t *data = mtod(m, uint8_t *);
3029	void (*input)(struct ifnet *, struct mbuf *);
3030	u8 ip_version;
3031
3032	ip_version = data[0] >> 4;
3033
3034	switch (ip_version) {
3035	case IPVERSION:
3036		input = ipv4_input;
3037		break;
3038	case (IPV6_VERSION >> 4):
3039		input = ipv6_input;
3040		break;
3041	default:
3042		/* Unknown protocol, just drop packet */
3043		m_freem(m);
3044		return 1;
3045		/* NOTREACHED */
3046	}
3047
3048	/* Needed for tcpdump(1) et.al */
3049	m->m_pkthdr.ph_rtableid = ifp->if_rdomain;
3050	m_adj(m, sizeof(u_int32_t));
3051
3052	(*input)(ifp, m);
3053	return 1;
3054}
3055#endif /* __OpenBSD__ */
3056
3057#ifdef __NetBSD__
3058static bool wwan_pmf_suspend(device_t, const pmf_qual_t *);
3059
3060/*
3061 * Process received raw IPv4/IPv6 packet. There is no encapsulation.
3062 */
3063static void
3064wwan_if_input(struct ifnet *ifp, struct mbuf *m)
3065{
3066	const uint8_t *data = mtod(m, uint8_t *);
3067	pktqueue_t *pktq = NULL;
3068	u8 ip_version;
3069
3070	KASSERT(!cpu_intr_p());
3071	KASSERT((m->m_flags & M_PKTHDR) != 0);
3072
3073	if ((ifp->if_flags & IFF_UP) == 0) {
3074		m_freem(m);
3075		return;
3076	}
3077
3078	if_statadd(ifp, if_ibytes, m->m_pkthdr.len);
3079
3080	/*
3081	 * The interface can't receive packets for other host, so never
3082	 * really IFF_PROMISC even if bpf listener is attached.
3083	 */
3084	if (pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_IN) != 0)
3085		return;
3086	if (m == NULL)
3087		return;
3088
3089	ip_version = data[0] >> 4;
3090	switch (ip_version) {
3091#ifdef INET
3092	case IPVERSION:
3093#ifdef GATEWAY
3094		if (ipflow_fastforward(m))
3095			return;
3096#endif
3097		pktq = ip_pktq;
3098		break;
3099#endif /* INET */
3100#ifdef INET6
3101	case (IPV6_VERSION >> 4):
3102		if (__predict_false(!in6_present)) {
3103			m_freem(m);
3104			return;
3105		}
3106#ifdef GATEWAY
3107		if (ip6flow_fastforward(&m))
3108			return;
3109#endif
3110		pktq = ip6_pktq;
3111		break;
3112#endif /* INET6 */
3113	default:
3114		/* Unknown protocol, just drop packet */
3115		m_freem(m);
3116		return;
3117		/* NOTREACHED */
3118	}
3119
3120	KASSERT(pktq != NULL);
3121
3122	/* No errors.  Receive the packet. */
3123	m_set_rcvif(m, ifp);
3124
3125	const uint32_t h = pktq_rps_hash(&xmm7360_pktq_rps_hash_p, m);
3126	if (__predict_false(!pktq_enqueue(pktq, m, h))) {
3127		m_freem(m);
3128	}
3129}
3130#endif
3131
3132/*
3133 * Transmit raw IPv4/IPv6 packet. No encapsulation necessary.
3134 */
3135static int
3136wwan_if_output(struct ifnet *ifp, struct mbuf *m,
3137    IF_OUTPUT_CONST struct sockaddr *dst, IF_OUTPUT_CONST struct rtentry *rt)
3138{
3139	// there is no ethernet frame, this means no bridge(4) handling
3140	return (if_enqueue(ifp, m));
3141}
3142
3143static int
3144wwan_if_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
3145{
3146	struct wwan_softc *sc_if = ifp->if_softc;
3147	int error = 0;
3148	int s;
3149
3150	s = splnet();
3151
3152	switch (cmd) {
3153#ifdef __NetBSD__
3154	case SIOCINITIFADDR:
3155#endif
3156#ifdef __OpenBSD__
3157	case SIOCAIFADDR:
3158	case SIOCAIFADDR_IN6:
3159	case SIOCSIFADDR:
3160#endif
3161		/* Make interface ready to run if address is assigned */
3162		ifp->if_flags |= IFF_UP;
3163		if (!(ifp->if_flags & IFF_RUNNING)) {
3164			ifp->if_flags |= IFF_RUNNING;
3165			xmm7360_mux_control(&sc_if->sc_xmm_net, 1, 0, 0, 0);
3166		}
3167		break;
3168	case SIOCSIFFLAGS:
3169	case SIOCADDMULTI:
3170	case SIOCDELMULTI:
3171		/* nothing special to do */
3172		break;
3173	case SIOCSIFMTU:
3174		error = ENOTTY;
3175		break;
3176	default:
3177#ifdef __NetBSD__
3178		/*
3179		 * Call common code for SIOCG* ioctls. In OpenBSD those ioctls
3180		 * are handled in ifioctl(), and the if_ioctl is not called
3181		 * for them at all.
3182		 */
3183		error = ifioctl_common(ifp, cmd, data);
3184		if (error == ENETRESET)
3185			error = 0;
3186#endif
3187#ifdef __OpenBSD__
3188		error = ENOTTY;
3189#endif
3190		break;
3191	}
3192
3193	splx(s);
3194
3195	return error;
3196}
3197
3198static void
3199wwan_if_start(struct ifnet *ifp)
3200{
3201	struct wwan_softc *sc = ifp->if_softc;
3202
3203	mutex_lock(&sc->sc_xmm_net.lock);
3204	while (!ifq_empty(&ifp->if_snd)) {
3205		if (!xmm7360_qp_can_write(sc->sc_xmm_net.qp)) {
3206			break;
3207		}
3208		xmm7360_net_flush(&sc->sc_xmm_net);
3209	}
3210	mutex_unlock(&sc->sc_xmm_net.lock);
3211}
3212
3213static int
3214wwan_match(struct device *parent, cfdata_t match, void *aux)
3215{
3216	struct wwanc_attach_args *wa = aux;
3217
3218	return (wa->aa_type == WWMC_TYPE_NET);
3219}
3220
3221static void
3222wwan_attach(struct device *parent, struct device *self, void *aux)
3223{
3224	struct wwan_softc *sc_if = device_private(self);
3225	struct ifnet *ifp = &sc_if->sc_ifnet;
3226	struct xmm_dev *xmm;
3227	struct xmm_net *xn;
3228
3229	sc_if->sc_dev = self;
3230	sc_if->sc_parent = device_private(parent);
3231	xmm = sc_if->sc_xmm_net.xmm = &sc_if->sc_parent->sc_xmm;
3232	xn = &sc_if->sc_xmm_net;
3233	mutex_init(&xn->lock);
3234
3235	/* QP already initialized in parent, just set pointers and start */
3236	xn->qp = &xmm->qp[0];
3237	xmm7360_qp_start(xn->qp);
3238	xmm->net = xn;
3239
3240	ifp->if_softc = sc_if;
3241	ifp->if_flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST \
3242		| IFF_SIMPLEX;
3243	ifp->if_ioctl = wwan_if_ioctl;
3244	ifp->if_start = wwan_if_start;
3245	ifp->if_mtu = 1500;
3246	ifp->if_hardmtu = 1500;
3247	ifp->if_type = IFT_OTHER;
3248	IFQ_SET_MAXLEN(&ifp->if_snd, xn->qp->depth);
3249	IFQ_SET_READY(&ifp->if_snd);
3250	CTASSERT(DEVICE_XNAME_SIZE == IFNAMSIZ);
3251	bcopy(device_xname(sc_if->sc_dev), ifp->if_xname, IFNAMSIZ);
3252
3253	/* Call MI attach routines. */
3254	if_attach(ifp);
3255
3256	/* Hook custom input and output processing, and dummy sadl */
3257	ifp->if_output = wwan_if_output;
3258	if_ih_insert(ifp, wwan_if_input, NULL);
3259	if_deferred_start_init(ifp, NULL);
3260	if_alloc_sadl(ifp);
3261#if NBPFILTER > 0
3262#ifdef __OpenBSD__
3263	bpfattach(&ifp->if_bpf, ifp, DLT_LOOP, sizeof(u_int32_t));
3264#endif
3265#ifdef __NetBSD__
3266	bpfattach(&ifp->if_bpf, ifp, DLT_RAW, 0);
3267#endif
3268#endif
3269
3270	printf("\n");
3271
3272#ifdef __NetBSD__
3273	xmm7360_pktq_rps_hash_p = pktq_rps_hash_default;
3274
3275	if (pmf_device_register(self, wwan_pmf_suspend, NULL))
3276		pmf_class_network_register(self, ifp);
3277	else
3278		aprint_error_dev(self, "couldn't establish power handler\n");
3279#endif
3280}
3281
3282static int
3283wwan_detach(struct device *self, int flags)
3284{
3285	struct wwan_softc *sc_if = device_private(self);
3286	struct ifnet *ifp = &sc_if->sc_ifnet;
3287
3288	if (ifp->if_flags & (IFF_UP|IFF_RUNNING))
3289		ifp->if_flags &= ~(IFF_UP|IFF_RUNNING);
3290
3291	pmf_device_deregister(self);
3292
3293	if_ih_remove(ifp, wwan_if_input, NULL);
3294	if_detach(ifp);
3295
3296	xmm7360_qp_stop(sc_if->sc_xmm_net.qp);
3297
3298	sc_if->sc_xmm_net.xmm->net = NULL;
3299
3300	return 0;
3301}
3302
3303static void
3304wwan_suspend(struct device *self)
3305{
3306	struct wwan_softc *sc_if = device_private(self);
3307	struct ifnet *ifp = &sc_if->sc_ifnet;
3308
3309	/*
3310	 * Interface is marked down on suspend, and needs to be reconfigured
3311	 * after resume.
3312	 */
3313	if (ifp->if_flags & (IFF_UP|IFF_RUNNING))
3314		ifp->if_flags &= ~(IFF_UP|IFF_RUNNING);
3315
3316	ifq_purge(&ifp->if_snd);
3317}
3318
3319#ifdef __OpenBSD__
3320static int
3321wwan_activate(struct device *self, int act)
3322{
3323	switch (act) {
3324	case DVACT_QUIESCE:
3325	case DVACT_SUSPEND:
3326		wwan_suspend(self);
3327		break;
3328	case DVACT_RESUME:
3329		/* Nothing to do */
3330		break;
3331	}
3332
3333	return 0;
3334}
3335
3336struct cfattach wwan_ca = {
3337        sizeof(struct wwan_softc), wwan_match, wwan_attach,
3338        wwan_detach, wwan_activate
3339};
3340
3341struct cfdriver wwan_cd = {
3342        NULL, "wwan", DV_IFNET
3343};
3344#endif /* __OpenBSD__ */
3345
3346#ifdef __NetBSD__
3347static bool
3348wwan_pmf_suspend(device_t self, const pmf_qual_t *qual)
3349{
3350	wwan_suspend(self);
3351	return true;
3352}
3353
3354CFATTACH_DECL3_NEW(wwan, sizeof(struct wwan_softc),
3355   wwan_match, wwan_attach, wwan_detach, NULL,
3356   NULL, NULL, DVF_DETACH_SHUTDOWN);
3357#endif /* __NetBSD__ */
3358
3359#endif /* __OpenBSD__ || __NetBSD__ */
3360