• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/infiniband/hw/ipath/
1#ifndef _IPATH_KERNEL_H
2#define _IPATH_KERNEL_H
3/*
4 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
5 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 *     Redistribution and use in source and binary forms, with or
14 *     without modification, are permitted provided that the following
15 *     conditions are met:
16 *
17 *      - Redistributions of source code must retain the above
18 *        copyright notice, this list of conditions and the following
19 *        disclaimer.
20 *
21 *      - Redistributions in binary form must reproduce the above
22 *        copyright notice, this list of conditions and the following
23 *        disclaimer in the documentation and/or other materials
24 *        provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36/*
37 * This header file is the base header file for infinipath kernel code
38 * ipath_user.h serves a similar purpose for user code.
39 */
40
41#include <linux/interrupt.h>
42#include <linux/pci.h>
43#include <linux/dma-mapping.h>
44#include <linux/mutex.h>
45#include <linux/list.h>
46#include <linux/scatterlist.h>
47#include <asm/io.h>
48#include <rdma/ib_verbs.h>
49
50#include "ipath_common.h"
51#include "ipath_debug.h"
52#include "ipath_registers.h"
53
54/* only s/w major version of InfiniPath we can handle */
55#define IPATH_CHIP_VERS_MAJ 2U
56
57/* don't care about this except printing */
58#define IPATH_CHIP_VERS_MIN 0U
59
60/* temporary, maybe always */
61extern struct infinipath_stats ipath_stats;
62
63#define IPATH_CHIP_SWVERSION IPATH_CHIP_VERS_MAJ
64/*
65 * First-cut critierion for "device is active" is
66 * two thousand dwords combined Tx, Rx traffic per
67 * 5-second interval. SMA packets are 64 dwords,
68 * and occur "a few per second", presumably each way.
69 */
70#define IPATH_TRAFFIC_ACTIVE_THRESHOLD (2000)
71/*
72 * Struct used to indicate which errors are logged in each of the
73 * error-counters that are logged to EEPROM. A counter is incremented
74 * _once_ (saturating at 255) for each event with any bits set in
75 * the error or hwerror register masks below.
76 */
77#define IPATH_EEP_LOG_CNT (4)
78struct ipath_eep_log_mask {
79	u64 errs_to_log;
80	u64 hwerrs_to_log;
81};
82
83struct ipath_portdata {
84	void **port_rcvegrbuf;
85	dma_addr_t *port_rcvegrbuf_phys;
86	/* rcvhdrq base, needs mmap before useful */
87	void *port_rcvhdrq;
88	/* kernel virtual address where hdrqtail is updated */
89	void *port_rcvhdrtail_kvaddr;
90	/*
91	 * temp buffer for expected send setup, allocated at open, instead
92	 * of each setup call
93	 */
94	void *port_tid_pg_list;
95	/* when waiting for rcv or pioavail */
96	wait_queue_head_t port_wait;
97	/*
98	 * rcvegr bufs base, physical, must fit
99	 * in 44 bits so 32 bit programs mmap64 44 bit works)
100	 */
101	dma_addr_t port_rcvegr_phys;
102	/* mmap of hdrq, must fit in 44 bits */
103	dma_addr_t port_rcvhdrq_phys;
104	dma_addr_t port_rcvhdrqtailaddr_phys;
105	/*
106	 * number of opens (including slave subports) on this instance
107	 * (ignoring forks, dup, etc. for now)
108	 */
109	int port_cnt;
110	/*
111	 * how much space to leave at start of eager TID entries for
112	 * protocol use, on each TID
113	 */
114	/* instead of calculating it */
115	unsigned port_port;
116	/* non-zero if port is being shared. */
117	u16 port_subport_cnt;
118	/* non-zero if port is being shared. */
119	u16 port_subport_id;
120	/* number of pio bufs for this port (all procs, if shared) */
121	u32 port_piocnt;
122	/* first pio buffer for this port */
123	u32 port_pio_base;
124	/* chip offset of PIO buffers for this port */
125	u32 port_piobufs;
126	/* how many alloc_pages() chunks in port_rcvegrbuf_pages */
127	u32 port_rcvegrbuf_chunks;
128	/* how many egrbufs per chunk */
129	u32 port_rcvegrbufs_perchunk;
130	/* order for port_rcvegrbuf_pages */
131	size_t port_rcvegrbuf_size;
132	/* rcvhdrq size (for freeing) */
133	size_t port_rcvhdrq_size;
134	/* next expected TID to check when looking for free */
135	u32 port_tidcursor;
136	/* next expected TID to check */
137	unsigned long port_flag;
138	/* what happened */
139	unsigned long int_flag;
140	/* WAIT_RCV that timed out, no interrupt */
141	u32 port_rcvwait_to;
142	/* WAIT_PIO that timed out, no interrupt */
143	u32 port_piowait_to;
144	/* WAIT_RCV already happened, no wait */
145	u32 port_rcvnowait;
146	/* WAIT_PIO already happened, no wait */
147	u32 port_pionowait;
148	/* total number of rcvhdrqfull errors */
149	u32 port_hdrqfull;
150	/*
151	 * Used to suppress multiple instances of same
152	 * port staying stuck at same point.
153	 */
154	u32 port_lastrcvhdrqtail;
155	/* saved total number of rcvhdrqfull errors for poll edge trigger */
156	u32 port_hdrqfull_poll;
157	/* total number of polled urgent packets */
158	u32 port_urgent;
159	/* saved total number of polled urgent packets for poll edge trigger */
160	u32 port_urgent_poll;
161	/* pid of process using this port */
162	struct pid *port_pid;
163	struct pid *port_subpid[INFINIPATH_MAX_SUBPORT];
164	/* same size as task_struct .comm[] */
165	char port_comm[16];
166	/* pkeys set by this use of this port */
167	u16 port_pkeys[4];
168	/* so file ops can get at unit */
169	struct ipath_devdata *port_dd;
170	/* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
171	void *subport_uregbase;
172	/* An array of pages for the eager receive buffers * N */
173	void *subport_rcvegrbuf;
174	/* An array of pages for the eager header queue entries * N */
175	void *subport_rcvhdr_base;
176	/* The version of the library which opened this port */
177	u32 userversion;
178	/* Bitmask of active slaves */
179	u32 active_slaves;
180	/* Type of packets or conditions we want to poll for */
181	u16 poll_type;
182	/* port rcvhdrq head offset */
183	u32 port_head;
184	/* receive packet sequence counter */
185	u32 port_seq_cnt;
186};
187
188struct sk_buff;
189struct ipath_sge_state;
190struct ipath_verbs_txreq;
191
192/*
193 * control information for layered drivers
194 */
195struct _ipath_layer {
196	void *l_arg;
197};
198
199struct ipath_skbinfo {
200	struct sk_buff *skb;
201	dma_addr_t phys;
202};
203
204struct ipath_sdma_txreq {
205	int                 flags;
206	int                 sg_count;
207	union {
208		struct scatterlist *sg;
209		void *map_addr;
210	};
211	void              (*callback)(void *, int);
212	void               *callback_cookie;
213	int                 callback_status;
214	u16                 start_idx;  /* sdma private */
215	u16                 next_descq_idx;  /* sdma private */
216	struct list_head    list;       /* sdma private */
217};
218
219struct ipath_sdma_desc {
220	__le64 qw[2];
221};
222
223#define IPATH_SDMA_TXREQ_F_USELARGEBUF  0x1
224#define IPATH_SDMA_TXREQ_F_HEADTOHOST   0x2
225#define IPATH_SDMA_TXREQ_F_INTREQ       0x4
226#define IPATH_SDMA_TXREQ_F_FREEBUF      0x8
227#define IPATH_SDMA_TXREQ_F_FREEDESC     0x10
228#define IPATH_SDMA_TXREQ_F_VL15         0x20
229
230#define IPATH_SDMA_TXREQ_S_OK        0
231#define IPATH_SDMA_TXREQ_S_SENDERROR 1
232#define IPATH_SDMA_TXREQ_S_ABORTED   2
233#define IPATH_SDMA_TXREQ_S_SHUTDOWN  3
234
235#define IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG	(1ull << 63)
236#define IPATH_SDMA_STATUS_ABORT_IN_PROG			(1ull << 62)
237#define IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE		(1ull << 61)
238#define IPATH_SDMA_STATUS_SCB_EMPTY			(1ull << 30)
239
240/* max dwords in small buffer packet */
241#define IPATH_SMALLBUF_DWORDS (dd->ipath_piosize2k >> 2)
242
243/*
244 * Possible IB config parameters for ipath_f_get/set_ib_cfg()
245 */
246#define IPATH_IB_CFG_LIDLMC 0 /* Get/set LID (LS16b) and Mask (MS16b) */
247#define IPATH_IB_CFG_HRTBT 1 /* Get/set Heartbeat off/enable/auto */
248#define IPATH_IB_HRTBT_ON 3 /* Heartbeat enabled, sent every 100msec */
249#define IPATH_IB_HRTBT_OFF 0 /* Heartbeat off */
250#define IPATH_IB_CFG_LWID_ENB 2 /* Get/set allowed Link-width */
251#define IPATH_IB_CFG_LWID 3 /* Get currently active Link-width */
252#define IPATH_IB_CFG_SPD_ENB 4 /* Get/set allowed Link speeds */
253#define IPATH_IB_CFG_SPD 5 /* Get current Link spd */
254#define IPATH_IB_CFG_RXPOL_ENB 6 /* Get/set Auto-RX-polarity enable */
255#define IPATH_IB_CFG_LREV_ENB 7 /* Get/set Auto-Lane-reversal enable */
256#define IPATH_IB_CFG_LINKLATENCY 8 /* Get Auto-Lane-reversal enable */
257
258
259struct ipath_devdata {
260	struct list_head ipath_list;
261
262	struct ipath_kregs const *ipath_kregs;
263	struct ipath_cregs const *ipath_cregs;
264
265	/* mem-mapped pointer to base of chip regs */
266	u64 __iomem *ipath_kregbase;
267	/* end of mem-mapped chip space; range checking */
268	u64 __iomem *ipath_kregend;
269	/* physical address of chip for io_remap, etc. */
270	unsigned long ipath_physaddr;
271	/* base of memory alloced for ipath_kregbase, for free */
272	u64 *ipath_kregalloc;
273	/* ipath_cfgports pointers */
274	struct ipath_portdata **ipath_pd;
275	/* sk_buffs used by port 0 eager receive queue */
276	struct ipath_skbinfo *ipath_port0_skbinfo;
277	/* kvirt address of 1st 2k pio buffer */
278	void __iomem *ipath_pio2kbase;
279	/* kvirt address of 1st 4k pio buffer */
280	void __iomem *ipath_pio4kbase;
281	/*
282	 * points to area where PIOavail registers will be DMA'ed.
283	 * Has to be on a page of it's own, because the page will be
284	 * mapped into user program space.  This copy is *ONLY* ever
285	 * written by DMA, not by the driver!  Need a copy per device
286	 * when we get to multiple devices
287	 */
288	volatile __le64 *ipath_pioavailregs_dma;
289	/* physical address where updates occur */
290	dma_addr_t ipath_pioavailregs_phys;
291	struct _ipath_layer ipath_layer;
292	/* setup intr */
293	int (*ipath_f_intrsetup)(struct ipath_devdata *);
294	/* fallback to alternate interrupt type if possible */
295	int (*ipath_f_intr_fallback)(struct ipath_devdata *);
296	/* setup on-chip bus config */
297	int (*ipath_f_bus)(struct ipath_devdata *, struct pci_dev *);
298	/* hard reset chip */
299	int (*ipath_f_reset)(struct ipath_devdata *);
300	int (*ipath_f_get_boardname)(struct ipath_devdata *, char *,
301				     size_t);
302	void (*ipath_f_init_hwerrors)(struct ipath_devdata *);
303	void (*ipath_f_handle_hwerrors)(struct ipath_devdata *, char *,
304					size_t);
305	void (*ipath_f_quiet_serdes)(struct ipath_devdata *);
306	int (*ipath_f_bringup_serdes)(struct ipath_devdata *);
307	int (*ipath_f_early_init)(struct ipath_devdata *);
308	void (*ipath_f_clear_tids)(struct ipath_devdata *, unsigned);
309	void (*ipath_f_put_tid)(struct ipath_devdata *, u64 __iomem*,
310				u32, unsigned long);
311	void (*ipath_f_tidtemplate)(struct ipath_devdata *);
312	void (*ipath_f_cleanup)(struct ipath_devdata *);
313	void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64);
314	/* fill out chip-specific fields */
315	int (*ipath_f_get_base_info)(struct ipath_portdata *, void *);
316	/* free irq */
317	void (*ipath_f_free_irq)(struct ipath_devdata *);
318	struct ipath_message_header *(*ipath_f_get_msgheader)
319					(struct ipath_devdata *, __le32 *);
320	void (*ipath_f_config_ports)(struct ipath_devdata *, ushort);
321	int (*ipath_f_get_ib_cfg)(struct ipath_devdata *, int);
322	int (*ipath_f_set_ib_cfg)(struct ipath_devdata *, int, u32);
323	void (*ipath_f_config_jint)(struct ipath_devdata *, u16 , u16);
324	void (*ipath_f_read_counters)(struct ipath_devdata *,
325					struct infinipath_counters *);
326	void (*ipath_f_xgxs_reset)(struct ipath_devdata *);
327	/* per chip actions needed for IB Link up/down changes */
328	int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64);
329
330	unsigned ipath_lastegr_idx;
331	struct ipath_ibdev *verbs_dev;
332	struct timer_list verbs_timer;
333	/* total dwords sent (summed from counter) */
334	u64 ipath_sword;
335	/* total dwords rcvd (summed from counter) */
336	u64 ipath_rword;
337	/* total packets sent (summed from counter) */
338	u64 ipath_spkts;
339	/* total packets rcvd (summed from counter) */
340	u64 ipath_rpkts;
341	/* ipath_statusp initially points to this. */
342	u64 _ipath_status;
343	/* GUID for this interface, in network order */
344	__be64 ipath_guid;
345	/*
346	 * aggregrate of error bits reported since last cleared, for
347	 * limiting of error reporting
348	 */
349	ipath_err_t ipath_lasterror;
350	/*
351	 * aggregrate of error bits reported since last cleared, for
352	 * limiting of hwerror reporting
353	 */
354	ipath_err_t ipath_lasthwerror;
355	/* errors masked because they occur too fast */
356	ipath_err_t ipath_maskederrs;
357	u64 ipath_lastlinkrecov; /* link recoveries at last ACTIVE */
358	/* these 5 fields are used to establish deltas for IB Symbol
359	 * errors and linkrecovery errors. They can be reported on
360	 * some chips during link negotiation prior to INIT, and with
361	 * DDR when faking DDR negotiations with non-IBTA switches.
362	 * The chip counters are adjusted at driver unload if there is
363	 * a non-zero delta.
364	 */
365	u64 ibdeltainprog;
366	u64 ibsymdelta;
367	u64 ibsymsnap;
368	u64 iblnkerrdelta;
369	u64 iblnkerrsnap;
370
371	/* time in jiffies at which to re-enable maskederrs */
372	unsigned long ipath_unmasktime;
373	/* count of egrfull errors, combined for all ports */
374	u64 ipath_last_tidfull;
375	/* for ipath_qcheck() */
376	u64 ipath_lastport0rcv_cnt;
377	/* template for writing TIDs  */
378	u64 ipath_tidtemplate;
379	/* value to write to free TIDs */
380	u64 ipath_tidinvalid;
381	/* IBA6120 rcv interrupt setup */
382	u64 ipath_rhdrhead_intr_off;
383
384	/* size of memory at ipath_kregbase */
385	u32 ipath_kregsize;
386	/* number of registers used for pioavail */
387	u32 ipath_pioavregs;
388	/* IPATH_POLL, etc. */
389	u32 ipath_flags;
390	/* ipath_flags driver is waiting for */
391	u32 ipath_state_wanted;
392	/* last buffer for user use, first buf for kernel use is this
393	 * index. */
394	u32 ipath_lastport_piobuf;
395	/* is a stats timer active */
396	u32 ipath_stats_timer_active;
397	/* number of interrupts for this device -- saturates... */
398	u32 ipath_int_counter;
399	/* dwords sent read from counter */
400	u32 ipath_lastsword;
401	/* dwords received read from counter */
402	u32 ipath_lastrword;
403	/* sent packets read from counter */
404	u32 ipath_lastspkts;
405	/* received packets read from counter */
406	u32 ipath_lastrpkts;
407	/* pio bufs allocated per port */
408	u32 ipath_pbufsport;
409	/* if remainder on bufs/port, ports < extrabuf get 1 extra */
410	u32 ipath_ports_extrabuf;
411	u32 ipath_pioupd_thresh; /* update threshold, some chips */
412	/*
413	 * number of ports configured as max; zero is set to number chip
414	 * supports, less gives more pio bufs/port, etc.
415	 */
416	u32 ipath_cfgports;
417	/* count of port 0 hdrqfull errors */
418	u32 ipath_p0_hdrqfull;
419	/* port 0 number of receive eager buffers */
420	u32 ipath_p0_rcvegrcnt;
421
422	/*
423	 * index of last piobuffer we used.  Speeds up searching, by
424	 * starting at this point.  Doesn't matter if multiple cpu's use and
425	 * update, last updater is only write that matters.  Whenever it
426	 * wraps, we update shadow copies.  Need a copy per device when we
427	 * get to multiple devices
428	 */
429	u32 ipath_lastpioindex;
430	u32 ipath_lastpioindexl;
431	/* max length of freezemsg */
432	u32 ipath_freezelen;
433	/*
434	 * consecutive times we wanted a PIO buffer but were unable to
435	 * get one
436	 */
437	u32 ipath_consec_nopiobuf;
438	/*
439	 * hint that we should update ipath_pioavailshadow before
440	 * looking for a PIO buffer
441	 */
442	u32 ipath_upd_pio_shadow;
443	/* so we can rewrite it after a chip reset */
444	u32 ipath_pcibar0;
445	/* so we can rewrite it after a chip reset */
446	u32 ipath_pcibar1;
447	u32 ipath_x1_fix_tries;
448	u32 ipath_autoneg_tries;
449	u32 serdes_first_init_done;
450
451	struct ipath_relock {
452		atomic_t ipath_relock_timer_active;
453		struct timer_list ipath_relock_timer;
454		unsigned int ipath_relock_interval; /* in jiffies */
455	} ipath_relock_singleton;
456
457	/* interrupt number */
458	int ipath_irq;
459	/* HT/PCI Vendor ID (here for NodeInfo) */
460	u16 ipath_vendorid;
461	/* HT/PCI Device ID (here for NodeInfo) */
462	u16 ipath_deviceid;
463	/* offset in HT config space of slave/primary interface block */
464	u8 ipath_ht_slave_off;
465	/* for write combining settings */
466	unsigned long ipath_wc_cookie;
467	unsigned long ipath_wc_base;
468	unsigned long ipath_wc_len;
469	/* ref count for each pkey */
470	atomic_t ipath_pkeyrefs[4];
471	/* shadow copy of struct page *'s for exp tid pages */
472	struct page **ipath_pageshadow;
473	/* shadow copy of dma handles for exp tid pages */
474	dma_addr_t *ipath_physshadow;
475	u64 __iomem *ipath_egrtidbase;
476	spinlock_t ipath_kernel_tid_lock;
477	spinlock_t ipath_user_tid_lock;
478	spinlock_t ipath_sendctrl_lock;
479	/* around ipath_pd and (user ports) port_cnt use (intr vs free) */
480	spinlock_t ipath_uctxt_lock;
481
482	/*
483	 * IPATH_STATUS_*,
484	 * this address is mapped readonly into user processes so they can
485	 * get status cheaply, whenever they want.
486	 */
487	u64 *ipath_statusp;
488	/* freeze msg if hw error put chip in freeze */
489	char *ipath_freezemsg;
490	/* pci access data structure */
491	struct pci_dev *pcidev;
492	struct cdev *user_cdev;
493	struct cdev *diag_cdev;
494	struct device *user_dev;
495	struct device *diag_dev;
496	/* timer used to prevent stats overflow, error throttling, etc. */
497	struct timer_list ipath_stats_timer;
498	/* timer to verify interrupts work, and fallback if possible */
499	struct timer_list ipath_intrchk_timer;
500	void *ipath_dummy_hdrq;	/* used after port close */
501	dma_addr_t ipath_dummy_hdrq_phys;
502
503	/* SendDMA related entries */
504	spinlock_t            ipath_sdma_lock;
505	unsigned long         ipath_sdma_status;
506	unsigned long         ipath_sdma_abort_jiffies;
507	unsigned long         ipath_sdma_abort_intr_timeout;
508	unsigned long         ipath_sdma_buf_jiffies;
509	struct ipath_sdma_desc *ipath_sdma_descq;
510	u64		      ipath_sdma_descq_added;
511	u64		      ipath_sdma_descq_removed;
512	int		      ipath_sdma_desc_nreserved;
513	u16                   ipath_sdma_descq_cnt;
514	u16                   ipath_sdma_descq_tail;
515	u16                   ipath_sdma_descq_head;
516	u16                   ipath_sdma_next_intr;
517	u16                   ipath_sdma_reset_wait;
518	u8                    ipath_sdma_generation;
519	struct tasklet_struct ipath_sdma_abort_task;
520	struct tasklet_struct ipath_sdma_notify_task;
521	struct list_head      ipath_sdma_activelist;
522	struct list_head      ipath_sdma_notifylist;
523	atomic_t              ipath_sdma_vl15_count;
524	struct timer_list     ipath_sdma_vl15_timer;
525
526	dma_addr_t       ipath_sdma_descq_phys;
527	volatile __le64 *ipath_sdma_head_dma;
528	dma_addr_t       ipath_sdma_head_phys;
529
530	unsigned long ipath_ureg_align; /* user register alignment */
531
532	struct delayed_work ipath_autoneg_work;
533	wait_queue_head_t ipath_autoneg_wait;
534
535	/* HoL blocking / user app forward-progress state */
536	unsigned          ipath_hol_state;
537	unsigned          ipath_hol_next;
538	struct timer_list ipath_hol_timer;
539
540	/*
541	 * Shadow copies of registers; size indicates read access size.
542	 * Most of them are readonly, but some are write-only register,
543	 * where we manipulate the bits in the shadow copy, and then write
544	 * the shadow copy to infinipath.
545	 *
546	 * We deliberately make most of these 32 bits, since they have
547	 * restricted range.  For any that we read, we won't to generate 32
548	 * bit accesses, since Opteron will generate 2 separate 32 bit HT
549	 * transactions for a 64 bit read, and we want to avoid unnecessary
550	 * HT transactions.
551	 */
552
553	/* This is the 64 bit group */
554
555	/*
556	 * shadow of pioavail, check to be sure it's large enough at
557	 * init time.
558	 */
559	unsigned long ipath_pioavailshadow[8];
560	/* bitmap of send buffers available for the kernel to use with PIO. */
561	unsigned long ipath_pioavailkernel[8];
562	/* shadow of kr_gpio_out, for rmw ops */
563	u64 ipath_gpio_out;
564	/* shadow the gpio mask register */
565	u64 ipath_gpio_mask;
566	/* shadow the gpio output enable, etc... */
567	u64 ipath_extctrl;
568	/* kr_revision shadow */
569	u64 ipath_revision;
570	/*
571	 * shadow of ibcctrl, for interrupt handling of link changes,
572	 * etc.
573	 */
574	u64 ipath_ibcctrl;
575	/*
576	 * last ibcstatus, to suppress "duplicate" status change messages,
577	 * mostly from 2 to 3
578	 */
579	u64 ipath_lastibcstat;
580	/* hwerrmask shadow */
581	ipath_err_t ipath_hwerrmask;
582	ipath_err_t ipath_errormask; /* errormask shadow */
583	/* interrupt config reg shadow */
584	u64 ipath_intconfig;
585	/* kr_sendpiobufbase value */
586	u64 ipath_piobufbase;
587	/* kr_ibcddrctrl shadow */
588	u64 ipath_ibcddrctrl;
589
590	/* these are the "32 bit" regs */
591
592	/*
593	 * number of GUIDs in the flash for this interface; may need some
594	 * rethinking for setting on other ifaces
595	 */
596	u32 ipath_nguid;
597	/*
598	 * the following two are 32-bit bitmasks, but {test,clear,set}_bit
599	 * all expect bit fields to be "unsigned long"
600	 */
601	/* shadow kr_rcvctrl */
602	unsigned long ipath_rcvctrl;
603	/* shadow kr_sendctrl */
604	unsigned long ipath_sendctrl;
605	/* to not count armlaunch after cancel */
606	unsigned long ipath_lastcancel;
607	/* count cases where special trigger was needed (double write) */
608	unsigned long ipath_spectriggerhit;
609
610	/* value we put in kr_rcvhdrcnt */
611	u32 ipath_rcvhdrcnt;
612	/* value we put in kr_rcvhdrsize */
613	u32 ipath_rcvhdrsize;
614	/* value we put in kr_rcvhdrentsize */
615	u32 ipath_rcvhdrentsize;
616	/* offset of last entry in rcvhdrq */
617	u32 ipath_hdrqlast;
618	/* kr_portcnt value */
619	u32 ipath_portcnt;
620	/* kr_pagealign value */
621	u32 ipath_palign;
622	/* number of "2KB" PIO buffers */
623	u32 ipath_piobcnt2k;
624	/* size in bytes of "2KB" PIO buffers */
625	u32 ipath_piosize2k;
626	/* number of "4KB" PIO buffers */
627	u32 ipath_piobcnt4k;
628	/* size in bytes of "4KB" PIO buffers */
629	u32 ipath_piosize4k;
630	u32 ipath_pioreserved; /* reserved special-inkernel; */
631	/* kr_rcvegrbase value */
632	u32 ipath_rcvegrbase;
633	/* kr_rcvegrcnt value */
634	u32 ipath_rcvegrcnt;
635	/* kr_rcvtidbase value */
636	u32 ipath_rcvtidbase;
637	/* kr_rcvtidcnt value */
638	u32 ipath_rcvtidcnt;
639	/* kr_sendregbase */
640	u32 ipath_sregbase;
641	/* kr_userregbase */
642	u32 ipath_uregbase;
643	/* kr_counterregbase */
644	u32 ipath_cregbase;
645	/* shadow the control register contents */
646	u32 ipath_control;
647	/* PCI revision register (HTC rev on FPGA) */
648	u32 ipath_pcirev;
649
650	/* chip address space used by 4k pio buffers */
651	u32 ipath_4kalign;
652	/* The MTU programmed for this unit */
653	u32 ipath_ibmtu;
654	/*
655	 * The max size IB packet, included IB headers that we can send.
656	 * Starts same as ipath_piosize, but is affected when ibmtu is
657	 * changed, or by size of eager buffers
658	 */
659	u32 ipath_ibmaxlen;
660	/*
661	 * ibmaxlen at init time, limited by chip and by receive buffer
662	 * size.  Not changed after init.
663	 */
664	u32 ipath_init_ibmaxlen;
665	/* size of each rcvegrbuffer */
666	u32 ipath_rcvegrbufsize;
667	/* localbus width (1, 2,4,8,16,32) from config space  */
668	u32 ipath_lbus_width;
669	/* localbus speed (HT: 200,400,800,1000; PCIe 2500) */
670	u32 ipath_lbus_speed;
671	/*
672	 * number of sequential ibcstatus change for polling active/quiet
673	 * (i.e., link not coming up).
674	 */
675	u32 ipath_ibpollcnt;
676	/* low and high portions of MSI capability/vector */
677	u32 ipath_msi_lo;
678	/* saved after PCIe init for restore after reset */
679	u32 ipath_msi_hi;
680	/* MSI data (vector) saved for restore */
681	u16 ipath_msi_data;
682	/* MLID programmed for this instance */
683	u16 ipath_mlid;
684	/* LID programmed for this instance */
685	u16 ipath_lid;
686	/* list of pkeys programmed; 0 if not set */
687	u16 ipath_pkeys[4];
688	/*
689	 * ASCII serial number, from flash, large enough for original
690	 * all digit strings, and longer QLogic serial number format
691	 */
692	u8 ipath_serial[16];
693	/* human readable board version */
694	u8 ipath_boardversion[96];
695	u8 ipath_lbus_info[32]; /* human readable localbus info */
696	/* chip major rev, from ipath_revision */
697	u8 ipath_majrev;
698	/* chip minor rev, from ipath_revision */
699	u8 ipath_minrev;
700	/* board rev, from ipath_revision */
701	u8 ipath_boardrev;
702	/* saved for restore after reset */
703	u8 ipath_pci_cacheline;
704	/* LID mask control */
705	u8 ipath_lmc;
706	/* link width supported */
707	u8 ipath_link_width_supported;
708	/* link speed supported */
709	u8 ipath_link_speed_supported;
710	u8 ipath_link_width_enabled;
711	u8 ipath_link_speed_enabled;
712	u8 ipath_link_width_active;
713	u8 ipath_link_speed_active;
714	/* Rx Polarity inversion (compensate for ~tx on partner) */
715	u8 ipath_rx_pol_inv;
716
717	u8 ipath_r_portenable_shift;
718	u8 ipath_r_intravail_shift;
719	u8 ipath_r_tailupd_shift;
720	u8 ipath_r_portcfg_shift;
721
722	/* unit # of this chip, if present */
723	int ipath_unit;
724
725	/* local link integrity counter */
726	u32 ipath_lli_counter;
727	/* local link integrity errors */
728	u32 ipath_lli_errors;
729	/*
730	 * Above counts only cases where _successive_ LocalLinkIntegrity
731	 * errors were seen in the receive headers of kern-packets.
732	 * Below are the three (monotonically increasing) counters
733	 * maintained via GPIO interrupts on iba6120-rev2.
734	 */
735	u32 ipath_rxfc_unsupvl_errs;
736	u32 ipath_overrun_thresh_errs;
737	u32 ipath_lli_errs;
738
739	/*
740	 * Not all devices managed by a driver instance are the same
741	 * type, so these fields must be per-device.
742	 */
743	u64 ipath_i_bitsextant;
744	ipath_err_t ipath_e_bitsextant;
745	ipath_err_t ipath_hwe_bitsextant;
746
747	/*
748	 * Below should be computable from number of ports,
749	 * since they are never modified.
750	 */
751	u64 ipath_i_rcvavail_mask;
752	u64 ipath_i_rcvurg_mask;
753	u16 ipath_i_rcvurg_shift;
754	u16 ipath_i_rcvavail_shift;
755
756	/*
757	 * Register bits for selecting i2c direction and values, used for
758	 * I2C serial flash.
759	 */
760	u8 ipath_gpio_sda_num;
761	u8 ipath_gpio_scl_num;
762	u8 ipath_i2c_chain_type;
763	u64 ipath_gpio_sda;
764	u64 ipath_gpio_scl;
765
766	/* lock for doing RMW of shadows/regs for ExtCtrl and GPIO */
767	spinlock_t ipath_gpio_lock;
768
769	/*
770	 * IB link and linktraining states and masks that vary per chip in
771	 * some way.  Set at init, to avoid each IB status change interrupt
772	 */
773	u8 ibcs_ls_shift;
774	u8 ibcs_lts_mask;
775	u32 ibcs_mask;
776	u32 ib_init;
777	u32 ib_arm;
778	u32 ib_active;
779
780	u16 ipath_rhf_offset; /* offset of RHF within receive header entry */
781
782	/*
783	 * shift/mask for linkcmd, linkinitcmd, maxpktlen in ibccontol
784	 * reg. Changes for IBA7220
785	 */
786	u8 ibcc_lic_mask; /* LinkInitCmd */
787	u8 ibcc_lc_shift; /* LinkCmd */
788	u8 ibcc_mpl_shift; /* Maxpktlen */
789
790	u8 delay_mult;
791
792	/* used to override LED behavior */
793	u8 ipath_led_override;  /* Substituted for normal value, if non-zero */
794	u16 ipath_led_override_timeoff; /* delta to next timer event */
795	u8 ipath_led_override_vals[2]; /* Alternates per blink-frame */
796	u8 ipath_led_override_phase; /* Just counts, LSB picks from vals[] */
797	atomic_t ipath_led_override_timer_active;
798	/* Used to flash LEDs in override mode */
799	struct timer_list ipath_led_override_timer;
800
801	/* Support (including locks) for EEPROM logging of errors and time */
802	/* control access to actual counters, timer */
803	spinlock_t ipath_eep_st_lock;
804	/* control high-level access to EEPROM */
805	struct mutex ipath_eep_lock;
806	/* Below inc'd by ipath_snap_cntrs(), locked by ipath_eep_st_lock */
807	uint64_t ipath_traffic_wds;
808	/* active time is kept in seconds, but logged in hours */
809	atomic_t ipath_active_time;
810	/* Below are nominal shadow of EEPROM, new since last EEPROM update */
811	uint8_t ipath_eep_st_errs[IPATH_EEP_LOG_CNT];
812	uint8_t ipath_eep_st_new_errs[IPATH_EEP_LOG_CNT];
813	uint16_t ipath_eep_hrs;
814	/*
815	 * masks for which bits of errs, hwerrs that cause
816	 * each of the counters to increment.
817	 */
818	struct ipath_eep_log_mask ipath_eep_st_masks[IPATH_EEP_LOG_CNT];
819
820	/* interrupt mitigation reload register info */
821	u16 ipath_jint_idle_ticks;	/* idle clock ticks */
822	u16 ipath_jint_max_packets;	/* max packets across all ports */
823
824	/*
825	 * lock for access to SerDes, and flags to sequence preset
826	 * versus steady-state. 7220-only at the moment.
827	 */
828	spinlock_t ipath_sdepb_lock;
829	u8 ipath_presets_needed; /* Set if presets to be restored next DOWN */
830};
831
832/* ipath_hol_state values (stopping/starting user proc, send flushing) */
833#define IPATH_HOL_UP       0
834#define IPATH_HOL_DOWN     1
835/* ipath_hol_next toggle values, used when hol_state IPATH_HOL_DOWN */
836#define IPATH_HOL_DOWNSTOP 0
837#define IPATH_HOL_DOWNCONT 1
838
839/* bit positions for sdma_status */
840#define IPATH_SDMA_ABORTING  0
841#define IPATH_SDMA_DISARMED  1
842#define IPATH_SDMA_DISABLED  2
843#define IPATH_SDMA_LAYERBUF  3
844#define IPATH_SDMA_RUNNING  30
845#define IPATH_SDMA_SHUTDOWN 31
846
847/* bit combinations that correspond to abort states */
848#define IPATH_SDMA_ABORT_NONE 0
849#define IPATH_SDMA_ABORT_ABORTING (1UL << IPATH_SDMA_ABORTING)
850#define IPATH_SDMA_ABORT_DISARMED ((1UL << IPATH_SDMA_ABORTING) | \
851	(1UL << IPATH_SDMA_DISARMED))
852#define IPATH_SDMA_ABORT_DISABLED ((1UL << IPATH_SDMA_ABORTING) | \
853	(1UL << IPATH_SDMA_DISABLED))
854#define IPATH_SDMA_ABORT_ABORTED ((1UL << IPATH_SDMA_ABORTING) | \
855	(1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
856#define IPATH_SDMA_ABORT_MASK ((1UL<<IPATH_SDMA_ABORTING) | \
857	(1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
858
859#define IPATH_SDMA_BUF_NONE 0
860#define IPATH_SDMA_BUF_MASK (1UL<<IPATH_SDMA_LAYERBUF)
861
862/* Private data for file operations */
863struct ipath_filedata {
864	struct ipath_portdata *pd;
865	unsigned subport;
866	unsigned tidcursor;
867	struct ipath_user_sdma_queue *pq;
868};
869extern struct list_head ipath_dev_list;
870extern spinlock_t ipath_devs_lock;
871extern struct ipath_devdata *ipath_lookup(int unit);
872
873int ipath_init_chip(struct ipath_devdata *, int);
874int ipath_enable_wc(struct ipath_devdata *dd);
875void ipath_disable_wc(struct ipath_devdata *dd);
876int ipath_count_units(int *npresentp, int *nupp, int *maxportsp);
877void ipath_shutdown_device(struct ipath_devdata *);
878void ipath_clear_freeze(struct ipath_devdata *);
879
880struct file_operations;
881int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
882		    struct cdev **cdevp, struct device **devp);
883void ipath_cdev_cleanup(struct cdev **cdevp,
884			struct device **devp);
885
886int ipath_diag_add(struct ipath_devdata *);
887void ipath_diag_remove(struct ipath_devdata *);
888
889extern wait_queue_head_t ipath_state_wait;
890
891int ipath_user_add(struct ipath_devdata *dd);
892void ipath_user_remove(struct ipath_devdata *dd);
893
894struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
895
896extern int ipath_diag_inuse;
897
898irqreturn_t ipath_intr(int irq, void *devid);
899int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
900		     ipath_err_t err);
901#if __IPATH_INFO || __IPATH_DBG
902extern const char *ipath_ibcstatus_str[];
903#endif
904
905/* clean up any per-chip chip-specific stuff */
906void ipath_chip_cleanup(struct ipath_devdata *);
907/* clean up any chip type-specific stuff */
908void ipath_chip_done(void);
909
910/* check to see if we have to force ordering for write combining */
911int ipath_unordered_wc(void);
912
913void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
914			  unsigned cnt);
915void ipath_cancel_sends(struct ipath_devdata *, int);
916
917int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *);
918void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
919
920int ipath_parse_ushort(const char *str, unsigned short *valp);
921
922void ipath_kreceive(struct ipath_portdata *);
923int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
924int ipath_reset_device(int);
925void ipath_get_faststats(unsigned long);
926int ipath_wait_linkstate(struct ipath_devdata *, u32, int);
927int ipath_set_linkstate(struct ipath_devdata *, u8);
928int ipath_set_mtu(struct ipath_devdata *, u16);
929int ipath_set_lid(struct ipath_devdata *, u32, u8);
930int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
931void ipath_enable_armlaunch(struct ipath_devdata *);
932void ipath_disable_armlaunch(struct ipath_devdata *);
933void ipath_hol_down(struct ipath_devdata *);
934void ipath_hol_up(struct ipath_devdata *);
935void ipath_hol_event(unsigned long);
936void ipath_toggle_rclkrls(struct ipath_devdata *);
937void ipath_sd7220_clr_ibpar(struct ipath_devdata *);
938void ipath_set_relock_poll(struct ipath_devdata *, int);
939void ipath_shutdown_relock_poll(struct ipath_devdata *);
940
941/* for use in system calls, where we want to know device type, etc. */
942#define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd
943#define subport_fp(fp) \
944	((struct ipath_filedata *)(fp)->private_data)->subport
945#define tidcursor_fp(fp) \
946	((struct ipath_filedata *)(fp)->private_data)->tidcursor
947#define user_sdma_queue_fp(fp) \
948	((struct ipath_filedata *)(fp)->private_data)->pq
949
950/*
951 * values for ipath_flags
952 */
953		/* chip can report link latency (IB 1.2) */
954#define IPATH_HAS_LINK_LATENCY 0x1
955		/* The chip is up and initted */
956#define IPATH_INITTED       0x2
957		/* set if any user code has set kr_rcvhdrsize */
958#define IPATH_RCVHDRSZ_SET  0x4
959		/* The chip is present and valid for accesses */
960#define IPATH_PRESENT       0x8
961		/* HT link0 is only 8 bits wide, ignore upper byte crc
962		 * errors, etc. */
963#define IPATH_8BIT_IN_HT0   0x10
964		/* HT link1 is only 8 bits wide, ignore upper byte crc
965		 * errors, etc. */
966#define IPATH_8BIT_IN_HT1   0x20
967		/* The link is down */
968#define IPATH_LINKDOWN      0x40
969		/* The link level is up (0x11) */
970#define IPATH_LINKINIT      0x80
971		/* The link is in the armed (0x21) state */
972#define IPATH_LINKARMED     0x100
973		/* The link is in the active (0x31) state */
974#define IPATH_LINKACTIVE    0x200
975		/* link current state is unknown */
976#define IPATH_LINKUNK       0x400
977		/* Write combining flush needed for PIO */
978#define IPATH_PIO_FLUSH_WC  0x1000
979		/* DMA Receive tail pointer */
980#define IPATH_NODMA_RTAIL   0x2000
981		/* no IB cable, or no device on IB cable */
982#define IPATH_NOCABLE       0x4000
983		/* Supports port zero per packet receive interrupts via
984		 * GPIO */
985#define IPATH_GPIO_INTR     0x8000
986		/* uses the coded 4byte TID, not 8 byte */
987#define IPATH_4BYTE_TID     0x10000
988		/* packet/word counters are 32 bit, else those 4 counters
989		 * are 64bit */
990#define IPATH_32BITCOUNTERS 0x20000
991		/* Interrupt register is 64 bits */
992#define IPATH_INTREG_64     0x40000
993		/* can miss port0 rx interrupts */
994#define IPATH_DISABLED      0x80000 /* administratively disabled */
995		/* Use GPIO interrupts for new counters */
996#define IPATH_GPIO_ERRINTRS 0x100000
997#define IPATH_SWAP_PIOBUFS  0x200000
998		/* Supports Send DMA */
999#define IPATH_HAS_SEND_DMA  0x400000
1000		/* Supports Send Count (not just word count) in PBC */
1001#define IPATH_HAS_PBC_CNT   0x800000
1002		/* Suppress heartbeat, even if turning off loopback */
1003#define IPATH_NO_HRTBT      0x1000000
1004#define IPATH_HAS_THRESH_UPDATE 0x4000000
1005#define IPATH_HAS_MULT_IB_SPEED 0x8000000
1006#define IPATH_IB_AUTONEG_INPROG 0x10000000
1007#define IPATH_IB_AUTONEG_FAILED 0x20000000
1008		/* Linkdown-disable intentionally, Do not attempt to bring up */
1009#define IPATH_IB_LINK_DISABLED 0x40000000
1010#define IPATH_IB_FORCE_NOTIFY 0x80000000 /* force notify on next ib change */
1011
1012/* Bits in GPIO for the added interrupts */
1013#define IPATH_GPIO_PORT0_BIT 2
1014#define IPATH_GPIO_RXUVL_BIT 3
1015#define IPATH_GPIO_OVRUN_BIT 4
1016#define IPATH_GPIO_LLI_BIT 5
1017#define IPATH_GPIO_ERRINTR_MASK 0x38
1018
1019/* portdata flag bit offsets */
1020		/* waiting for a packet to arrive */
1021#define IPATH_PORT_WAITING_RCV   2
1022		/* master has not finished initializing */
1023#define IPATH_PORT_MASTER_UNINIT 4
1024		/* waiting for an urgent packet to arrive */
1025#define IPATH_PORT_WAITING_URG 5
1026
1027/* free up any allocated data at closes */
1028void ipath_free_data(struct ipath_portdata *dd);
1029u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
1030void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1031				unsigned len, int avail);
1032void ipath_init_iba6110_funcs(struct ipath_devdata *);
1033void ipath_get_eeprom_info(struct ipath_devdata *);
1034int ipath_update_eeprom_log(struct ipath_devdata *dd);
1035void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
1036u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
1037void ipath_disarm_senderrbufs(struct ipath_devdata *);
1038void ipath_force_pio_avail_update(struct ipath_devdata *);
1039void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
1040
1041/*
1042 * Set LED override, only the two LSBs have "public" meaning, but
1043 * any non-zero value substitutes them for the Link and LinkTrain
1044 * LED states.
1045 */
1046#define IPATH_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
1047#define IPATH_LED_LOG 2  /* Logical (link) YELLOW LED */
1048void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val);
1049
1050/* send dma routines */
1051int setup_sdma(struct ipath_devdata *);
1052void teardown_sdma(struct ipath_devdata *);
1053void ipath_restart_sdma(struct ipath_devdata *);
1054void ipath_sdma_intr(struct ipath_devdata *);
1055int ipath_sdma_verbs_send(struct ipath_devdata *, struct ipath_sge_state *,
1056			  u32, struct ipath_verbs_txreq *);
1057/* ipath_sdma_lock should be locked before calling this. */
1058int ipath_sdma_make_progress(struct ipath_devdata *dd);
1059
1060/* must be called under ipath_sdma_lock */
1061static inline u16 ipath_sdma_descq_freecnt(const struct ipath_devdata *dd)
1062{
1063	return dd->ipath_sdma_descq_cnt -
1064		(dd->ipath_sdma_descq_added - dd->ipath_sdma_descq_removed) -
1065		1 - dd->ipath_sdma_desc_nreserved;
1066}
1067
1068static inline void ipath_sdma_desc_reserve(struct ipath_devdata *dd, u16 cnt)
1069{
1070	dd->ipath_sdma_desc_nreserved += cnt;
1071}
1072
1073static inline void ipath_sdma_desc_unreserve(struct ipath_devdata *dd, u16 cnt)
1074{
1075	dd->ipath_sdma_desc_nreserved -= cnt;
1076}
1077
1078/*
1079 * number of words used for protocol header if not set by ipath_userinit();
1080 */
1081#define IPATH_DFLT_RCVHDRSIZE 9
1082
1083int ipath_get_user_pages(unsigned long, size_t, struct page **);
1084void ipath_release_user_pages(struct page **, size_t);
1085void ipath_release_user_pages_on_close(struct page **, size_t);
1086int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
1087int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int);
1088int ipath_tempsense_read(struct ipath_devdata *, u8 regnum);
1089int ipath_tempsense_write(struct ipath_devdata *, u8 regnum, u8 data);
1090
1091/* these are used for the registers that vary with port */
1092void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
1093			   unsigned, u64);
1094
1095/*
1096 * We could have a single register get/put routine, that takes a group type,
1097 * but this is somewhat clearer and cleaner.  It also gives us some error
1098 * checking.  64 bit register reads should always work, but are inefficient
1099 * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
1100 * so we use kreg32 wherever possible.  User register and counter register
1101 * reads are always 32 bit reads, so only one form of those routines.
1102 */
1103
1104/*
1105 * At the moment, none of the s-registers are writable, so no
1106 * ipath_write_sreg().
1107 */
1108
1109/**
1110 * ipath_read_ureg32 - read 32-bit virtualized per-port register
1111 * @dd: device
1112 * @regno: register number
1113 * @port: port number
1114 *
1115 * Return the contents of a register that is virtualized to be per port.
1116 * Returns -1 on errors (not distinguishable from valid contents at
1117 * runtime; we may add a separate error variable at some point).
1118 */
1119static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
1120				    ipath_ureg regno, int port)
1121{
1122	if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1123		return 0;
1124
1125	return readl(regno + (u64 __iomem *)
1126		     (dd->ipath_uregbase +
1127		      (char __iomem *)dd->ipath_kregbase +
1128		      dd->ipath_ureg_align * port));
1129}
1130
1131/**
1132 * ipath_write_ureg - write 32-bit virtualized per-port register
1133 * @dd: device
1134 * @regno: register number
1135 * @value: value
1136 * @port: port
1137 *
1138 * Write the contents of a register that is virtualized to be per port.
1139 */
1140static inline void ipath_write_ureg(const struct ipath_devdata *dd,
1141				    ipath_ureg regno, u64 value, int port)
1142{
1143	u64 __iomem *ubase = (u64 __iomem *)
1144		(dd->ipath_uregbase + (char __iomem *) dd->ipath_kregbase +
1145		 dd->ipath_ureg_align * port);
1146	if (dd->ipath_kregbase)
1147		writeq(value, &ubase[regno]);
1148}
1149
1150static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
1151				    ipath_kreg regno)
1152{
1153	if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1154		return -1;
1155	return readl((u32 __iomem *) & dd->ipath_kregbase[regno]);
1156}
1157
1158static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd,
1159				    ipath_kreg regno)
1160{
1161	if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1162		return -1;
1163
1164	return readq(&dd->ipath_kregbase[regno]);
1165}
1166
1167static inline void ipath_write_kreg(const struct ipath_devdata *dd,
1168				    ipath_kreg regno, u64 value)
1169{
1170	if (dd->ipath_kregbase)
1171		writeq(value, &dd->ipath_kregbase[regno]);
1172}
1173
1174static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
1175				  ipath_sreg regno)
1176{
1177	if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1178		return 0;
1179
1180	return readq(regno + (u64 __iomem *)
1181		     (dd->ipath_cregbase +
1182		      (char __iomem *)dd->ipath_kregbase));
1183}
1184
1185static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
1186					 ipath_sreg regno)
1187{
1188	if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
1189		return 0;
1190	return readl(regno + (u64 __iomem *)
1191		     (dd->ipath_cregbase +
1192		      (char __iomem *)dd->ipath_kregbase));
1193}
1194
1195static inline void ipath_write_creg(const struct ipath_devdata *dd,
1196				    ipath_creg regno, u64 value)
1197{
1198	if (dd->ipath_kregbase)
1199		writeq(value, regno + (u64 __iomem *)
1200		       (dd->ipath_cregbase +
1201			(char __iomem *)dd->ipath_kregbase));
1202}
1203
1204static inline void ipath_clear_rcvhdrtail(const struct ipath_portdata *pd)
1205{
1206	*((u64 *) pd->port_rcvhdrtail_kvaddr) = 0ULL;
1207}
1208
1209static inline u32 ipath_get_rcvhdrtail(const struct ipath_portdata *pd)
1210{
1211	return (u32) le64_to_cpu(*((volatile __le64 *)
1212				pd->port_rcvhdrtail_kvaddr));
1213}
1214
1215static inline u32 ipath_get_hdrqtail(const struct ipath_portdata *pd)
1216{
1217	const struct ipath_devdata *dd = pd->port_dd;
1218	u32 hdrqtail;
1219
1220	if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1221		__le32 *rhf_addr;
1222		u32 seq;
1223
1224		rhf_addr = (__le32 *) pd->port_rcvhdrq +
1225			pd->port_head + dd->ipath_rhf_offset;
1226		seq = ipath_hdrget_seq(rhf_addr);
1227		hdrqtail = pd->port_head;
1228		if (seq == pd->port_seq_cnt)
1229			hdrqtail++;
1230	} else
1231		hdrqtail = ipath_get_rcvhdrtail(pd);
1232
1233	return hdrqtail;
1234}
1235
1236static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r)
1237{
1238	return (dd->ipath_flags & IPATH_INTREG_64) ?
1239		ipath_read_kreg64(dd, r) : ipath_read_kreg32(dd, r);
1240}
1241
1242/*
1243 * from contents of IBCStatus (or a saved copy), return linkstate
1244 * Report ACTIVE_DEFER as ACTIVE, because we treat them the same
1245 * everywhere, anyway (and should be, for almost all purposes).
1246 */
1247static inline u32 ipath_ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
1248{
1249	u32 state = (u32)(ibcs >> dd->ibcs_ls_shift) &
1250		INFINIPATH_IBCS_LINKSTATE_MASK;
1251	if (state == INFINIPATH_IBCS_L_STATE_ACT_DEFER)
1252		state = INFINIPATH_IBCS_L_STATE_ACTIVE;
1253	return state;
1254}
1255
1256/* from contents of IBCStatus (or a saved copy), return linktrainingstate */
1257static inline u32 ipath_ib_linktrstate(struct ipath_devdata *dd, u64 ibcs)
1258{
1259	return (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
1260		dd->ibcs_lts_mask;
1261}
1262
1263/*
1264 * from contents of IBCStatus (or a saved copy), return logical link state
1265 * combination of link state and linktraining state (down, active, init,
1266 * arm, etc.
1267 */
1268static inline u32 ipath_ib_state(struct ipath_devdata *dd, u64 ibcs)
1269{
1270	u32 ibs;
1271	ibs = (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
1272		dd->ibcs_lts_mask;
1273	ibs |= (u32)(ibcs &
1274		(INFINIPATH_IBCS_LINKSTATE_MASK << dd->ibcs_ls_shift));
1275	return ibs;
1276}
1277
1278/*
1279 * sysfs interface.
1280 */
1281
1282struct device_driver;
1283
1284extern const char ib_ipath_version[];
1285
1286extern const struct attribute_group *ipath_driver_attr_groups[];
1287
1288int ipath_device_create_group(struct device *, struct ipath_devdata *);
1289void ipath_device_remove_group(struct device *, struct ipath_devdata *);
1290int ipath_expose_reset(struct device *);
1291
1292int ipath_init_ipathfs(void);
1293void ipath_exit_ipathfs(void);
1294int ipathfs_add_device(struct ipath_devdata *);
1295int ipathfs_remove_device(struct ipath_devdata *);
1296
1297/*
1298 * dma_addr wrappers - all 0's invalid for hw
1299 */
1300dma_addr_t ipath_map_page(struct pci_dev *, struct page *, unsigned long,
1301			  size_t, int);
1302dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int);
1303const char *ipath_get_unit_name(int unit);
1304
1305/*
1306 * Flush write combining store buffers (if present) and perform a write
1307 * barrier.
1308 */
1309#if defined(CONFIG_X86_64)
1310#define ipath_flush_wc() asm volatile("sfence" ::: "memory")
1311#else
1312#define ipath_flush_wc() wmb()
1313#endif
1314
1315extern unsigned ipath_debug; /* debugging bit mask */
1316extern unsigned ipath_linkrecovery;
1317extern unsigned ipath_mtu4096;
1318extern struct mutex ipath_mutex;
1319
1320#define IPATH_DRV_NAME		"ib_ipath"
1321#define IPATH_MAJOR		233
1322#define IPATH_USER_MINOR_BASE	0
1323#define IPATH_DIAGPKT_MINOR	127
1324#define IPATH_DIAG_MINOR_BASE	129
1325#define IPATH_NMINORS		255
1326
1327#define ipath_dev_err(dd,fmt,...) \
1328	do { \
1329		const struct ipath_devdata *__dd = (dd); \
1330		if (__dd->pcidev) \
1331			dev_err(&__dd->pcidev->dev, "%s: " fmt, \
1332				ipath_get_unit_name(__dd->ipath_unit), \
1333				##__VA_ARGS__); \
1334		else \
1335			printk(KERN_ERR IPATH_DRV_NAME ": %s: " fmt, \
1336			       ipath_get_unit_name(__dd->ipath_unit), \
1337			       ##__VA_ARGS__); \
1338	} while (0)
1339
1340#if _IPATH_DEBUGGING
1341
1342# define __IPATH_DBG_WHICH(which,fmt,...) \
1343	do { \
1344		if (unlikely(ipath_debug & (which))) \
1345			printk(KERN_DEBUG IPATH_DRV_NAME ": %s: " fmt, \
1346			       __func__,##__VA_ARGS__); \
1347	} while(0)
1348
1349# define ipath_dbg(fmt,...) \
1350	__IPATH_DBG_WHICH(__IPATH_DBG,fmt,##__VA_ARGS__)
1351# define ipath_cdbg(which,fmt,...) \
1352	__IPATH_DBG_WHICH(__IPATH_##which##DBG,fmt,##__VA_ARGS__)
1353
1354#else /* ! _IPATH_DEBUGGING */
1355
1356# define ipath_dbg(fmt,...)
1357# define ipath_cdbg(which,fmt,...)
1358
1359#endif /* _IPATH_DEBUGGING */
1360
1361/*
1362 * this is used for formatting hw error messages...
1363 */
1364struct ipath_hwerror_msgs {
1365	u64 mask;
1366	const char *msg;
1367};
1368
1369#define INFINIPATH_HWE_MSG(a, b) { .mask = INFINIPATH_HWE_##a, .msg = b }
1370
1371/* in ipath_intr.c... */
1372void ipath_format_hwerrors(u64 hwerrs,
1373			   const struct ipath_hwerror_msgs *hwerrmsgs,
1374			   size_t nhwerrmsgs,
1375			   char *msg, size_t lmsg);
1376
1377#endif				/* _IPATH_KERNEL_H */
1378