• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/bnx2x/
1
2#ifndef BNX2X_CMN_H
3#define BNX2X_CMN_H
4
5#include <linux/types.h>
6#include <linux/netdevice.h>
7
8
9#include "bnx2x.h"
10
11
12/*********************** Interfaces ****************************
13 *  Functions that need to be implemented by each driver version
14 */
15
16/**
17 * Initialize link parameters structure variables.
18 *
19 * @param bp
20 * @param load_mode
21 *
22 * @return u8
23 */
24u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
25
26/**
27 * Configure hw according to link parameters structure.
28 *
29 * @param bp
30 */
31void bnx2x_link_set(struct bnx2x *bp);
32
33/**
34 * Query link status
35 *
36 * @param bp
37 *
38 * @return 0 - link is UP
39 */
40u8 bnx2x_link_test(struct bnx2x *bp);
41
42/**
43 * Handles link status change
44 *
45 * @param bp
46 */
47void bnx2x__link_status_update(struct bnx2x *bp);
48
49/**
50 * MSI-X slowpath interrupt handler
51 *
52 * @param irq
53 * @param dev_instance
54 *
55 * @return irqreturn_t
56 */
57irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
58
59/**
60 * non MSI-X interrupt handler
61 *
62 * @param irq
63 * @param dev_instance
64 *
65 * @return irqreturn_t
66 */
67irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
68#ifdef BCM_CNIC
69
70/**
71 * Send command to cnic driver
72 *
73 * @param bp
74 * @param cmd
75 */
76int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
77
78/**
79 * Provides cnic information for proper interrupt handling
80 *
81 * @param bp
82 */
83void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
84#endif
85
86/**
87 * Enable HW interrupts.
88 *
89 * @param bp
90 */
91void bnx2x_int_enable(struct bnx2x *bp);
92
93/**
94 * Disable interrupts. This function ensures that there are no
95 * ISRs or SP DPCs (sp_task) are running after it returns.
96 *
97 * @param bp
98 * @param disable_hw if true, disable HW interrupts.
99 */
100void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
101
102/**
103 * Init HW blocks according to current initialization stage:
104 * COMMON, PORT or FUNCTION.
105 *
106 * @param bp
107 * @param load_code: COMMON, PORT or FUNCTION
108 *
109 * @return int
110 */
111int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
112
113/**
114 * Init driver internals:
115 *  - rings
116 *  - status blocks
117 *  - etc.
118 *
119 * @param bp
120 * @param load_code COMMON, PORT or FUNCTION
121 */
122void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
123
124/**
125 * Allocate driver's memory.
126 *
127 * @param bp
128 *
129 * @return int
130 */
131int bnx2x_alloc_mem(struct bnx2x *bp);
132
133/**
134 * Release driver's memory.
135 *
136 * @param bp
137 */
138void bnx2x_free_mem(struct bnx2x *bp);
139
140/**
141 * Bring up a leading (the first) eth Client.
142 *
143 * @param bp
144 *
145 * @return int
146 */
147int bnx2x_setup_leading(struct bnx2x *bp);
148
149/**
150 * Setup non-leading eth Client.
151 *
152 * @param bp
153 * @param fp
154 *
155 * @return int
156 */
157int bnx2x_setup_multi(struct bnx2x *bp, int index);
158
159/**
160 * Set number of quueus according to mode and number of available
161 * msi-x vectors
162 *
163 * @param bp
164 *
165 */
166void bnx2x_set_num_queues_msix(struct bnx2x *bp);
167
168/**
169 * Cleanup chip internals:
170 * - Cleanup MAC configuration.
171 * - Close clients.
172 * - etc.
173 *
174 * @param bp
175 * @param unload_mode
176 */
177void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
178
179/**
180 * Acquire HW lock.
181 *
182 * @param bp
183 * @param resource Resource bit which was locked
184 *
185 * @return int
186 */
187int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
188
189/**
190 * Release HW lock.
191 *
192 * @param bp driver handle
193 * @param resource Resource bit which was locked
194 *
195 * @return int
196 */
197int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
198
199/**
200 * Configure eth MAC address in the HW according to the value in
201 * netdev->dev_addr for 57711
202 *
203 * @param bp driver handle
204 * @param set
205 */
206void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
207
208/**
209 * Configure eth MAC address in the HW according to the value in
210 * netdev->dev_addr for 57710
211 *
212 * @param bp driver handle
213 * @param set
214 */
215void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set);
216
217#ifdef BCM_CNIC
218/**
219 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
220 * MAC(s). The function will wait until the ramrod completion
221 * returns.
222 *
223 * @param bp driver handle
224 * @param set set or clear the CAM entry
225 *
226 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
227 */
228int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
229#endif
230
231/**
232 * Initialize status block in FW and HW
233 *
234 * @param bp driver handle
235 * @param sb host_status_block
236 * @param dma_addr_t mapping
237 * @param int sb_id
238 */
239void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
240			  dma_addr_t mapping, int sb_id);
241
242/**
243 * Reconfigure FW/HW according to dev->flags rx mode
244 *
245 * @param dev net_device
246 *
247 */
248void bnx2x_set_rx_mode(struct net_device *dev);
249
250/**
251 * Configure MAC filtering rules in a FW.
252 *
253 * @param bp driver handle
254 */
255void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
256
257/* Parity errors related */
258void bnx2x_inc_load_cnt(struct bnx2x *bp);
259u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
260bool bnx2x_chk_parity_attn(struct bnx2x *bp);
261bool bnx2x_reset_is_done(struct bnx2x *bp);
262void bnx2x_disable_close_the_gate(struct bnx2x *bp);
263
264/**
265 * Perform statistics handling according to event
266 *
267 * @param bp driver handle
268 * @param even tbnx2x_stats_event
269 */
270void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
271
272/**
273 * Configures FW with client paramteres (like HW VLAN removal)
274 * for each active client.
275 *
276 * @param bp
277 */
278void bnx2x_set_client_config(struct bnx2x *bp);
279
280/**
281 * Handle sp events
282 *
283 * @param fp fastpath handle for the event
284 * @param rr_cqe eth_rx_cqe
285 */
286void bnx2x_sp_event(struct bnx2x_fastpath *fp,  union eth_rx_cqe *rr_cqe);
287
288
289static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
290{
291	struct host_status_block *fpsb = fp->status_blk;
292
293	barrier(); /* status block is written to by the chip */
294	fp->fp_c_idx = fpsb->c_status_block.status_block_index;
295	fp->fp_u_idx = fpsb->u_status_block.status_block_index;
296}
297
298static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
299					struct bnx2x_fastpath *fp,
300					u16 bd_prod, u16 rx_comp_prod,
301					u16 rx_sge_prod)
302{
303	struct ustorm_eth_rx_producers rx_prods = {0};
304	int i;
305
306	/* Update producers */
307	rx_prods.bd_prod = bd_prod;
308	rx_prods.cqe_prod = rx_comp_prod;
309	rx_prods.sge_prod = rx_sge_prod;
310
311	/*
312	 * Make sure that the BD and SGE data is updated before updating the
313	 * producers since FW might read the BD/SGE right after the producer
314	 * is updated.
315	 * This is only applicable for weak-ordered memory model archs such
316	 * as IA-64. The following barrier is also mandatory since FW will
317	 * assumes BDs must have buffers.
318	 */
319	wmb();
320
321	for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
322		REG_WR(bp, BAR_USTRORM_INTMEM +
323		       USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
324		       ((u32 *)&rx_prods)[i]);
325
326	mmiowb(); /* keep prod updates ordered */
327
328	DP(NETIF_MSG_RX_STATUS,
329	   "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
330	   fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
331}
332
333
334
335static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
336				u8 storm, u16 index, u8 op, u8 update)
337{
338	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
339		       COMMAND_REG_INT_ACK);
340	struct igu_ack_register igu_ack;
341
342	igu_ack.status_block_index = index;
343	igu_ack.sb_id_and_flags =
344			((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
345			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
346			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
347			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
348
349	DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
350	   (*(u32 *)&igu_ack), hc_addr);
351	REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
352
353	/* Make sure that ACK is written */
354	mmiowb();
355	barrier();
356}
357static inline u16 bnx2x_ack_int(struct bnx2x *bp)
358{
359	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
360		       COMMAND_REG_SIMD_MASK);
361	u32 result = REG_RD(bp, hc_addr);
362
363	DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
364	   result, hc_addr);
365
366	return result;
367}
368
369/*
370 * fast path service functions
371 */
372
373static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
374{
375	/* Tell compiler that consumer and producer can change */
376	barrier();
377	return (fp->tx_pkt_prod != fp->tx_pkt_cons);
378}
379
380static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
381{
382	s16 used;
383	u16 prod;
384	u16 cons;
385
386	prod = fp->tx_bd_prod;
387	cons = fp->tx_bd_cons;
388
389	/* NUM_TX_RINGS = number of "next-page" entries
390	   It will be used as a threshold */
391	used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
392
393#ifdef BNX2X_STOP_ON_ERROR
394	WARN_ON(used < 0);
395	WARN_ON(used > fp->bp->tx_ring_size);
396	WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
397#endif
398
399	return (s16)(fp->bp->tx_ring_size) - used;
400}
401
402static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
403{
404	u16 hw_cons;
405
406	/* Tell compiler that status block fields can change */
407	barrier();
408	hw_cons = le16_to_cpu(*fp->tx_cons_sb);
409	return hw_cons != fp->tx_pkt_cons;
410}
411
412static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
413				     struct bnx2x_fastpath *fp, u16 index)
414{
415	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
416	struct page *page = sw_buf->page;
417	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
418
419	/* Skip "next page" elements */
420	if (!page)
421		return;
422
423	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
424		       SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
425	__free_pages(page, PAGES_PER_SGE_SHIFT);
426
427	sw_buf->page = NULL;
428	sge->addr_hi = 0;
429	sge->addr_lo = 0;
430}
431
432static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
433					   struct bnx2x_fastpath *fp, int last)
434{
435	int i;
436
437	for (i = 0; i < last; i++)
438		bnx2x_free_rx_sge(bp, fp, i);
439}
440
441static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
442				     struct bnx2x_fastpath *fp, u16 index)
443{
444	struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
445	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
446	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
447	dma_addr_t mapping;
448
449	if (unlikely(page == NULL))
450		return -ENOMEM;
451
452	mapping = dma_map_page(&bp->pdev->dev, page, 0,
453			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
454	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
455		__free_pages(page, PAGES_PER_SGE_SHIFT);
456		return -ENOMEM;
457	}
458
459	sw_buf->page = page;
460	dma_unmap_addr_set(sw_buf, mapping, mapping);
461
462	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
463	sge->addr_lo = cpu_to_le32(U64_LO(mapping));
464
465	return 0;
466}
467static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
468				     struct bnx2x_fastpath *fp, u16 index)
469{
470	struct sk_buff *skb;
471	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
472	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
473	dma_addr_t mapping;
474
475	skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
476	if (unlikely(skb == NULL))
477		return -ENOMEM;
478
479	mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
480				 DMA_FROM_DEVICE);
481	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
482		dev_kfree_skb(skb);
483		return -ENOMEM;
484	}
485
486	rx_buf->skb = skb;
487	dma_unmap_addr_set(rx_buf, mapping, mapping);
488
489	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
490	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
491
492	return 0;
493}
494
495/* note that we are not allocating a new skb,
496 * we are just moving one from cons to prod
497 * we are not creating a new mapping,
498 * so there is no need to check for dma_mapping_error().
499 */
500static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
501			       struct sk_buff *skb, u16 cons, u16 prod)
502{
503	struct bnx2x *bp = fp->bp;
504	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
505	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
506	struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
507	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
508
509	dma_sync_single_for_device(&bp->pdev->dev,
510				   dma_unmap_addr(cons_rx_buf, mapping),
511				   RX_COPY_THRESH, DMA_FROM_DEVICE);
512
513	prod_rx_buf->skb = cons_rx_buf->skb;
514	dma_unmap_addr_set(prod_rx_buf, mapping,
515			   dma_unmap_addr(cons_rx_buf, mapping));
516	*prod_bd = *cons_bd;
517}
518
519static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
520{
521	int i, j;
522
523	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
524		int idx = RX_SGE_CNT * i - 1;
525
526		for (j = 0; j < 2; j++) {
527			SGE_MASK_CLEAR_BIT(fp, idx);
528			idx--;
529		}
530	}
531}
532
533static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
534{
535	/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
536	memset(fp->sge_mask, 0xff,
537	       (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
538
539	/* Clear the two last indices in the page to 1:
540	   these are the indices that correspond to the "next" element,
541	   hence will never be indicated and should be removed from
542	   the calculations. */
543	bnx2x_clear_sge_mask_next_elems(fp);
544}
545static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
546				       struct bnx2x_fastpath *fp, int last)
547{
548	int i;
549
550	for (i = 0; i < last; i++) {
551		struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
552		struct sk_buff *skb = rx_buf->skb;
553
554		if (skb == NULL) {
555			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
556			continue;
557		}
558
559		if (fp->tpa_state[i] == BNX2X_TPA_START)
560			dma_unmap_single(&bp->pdev->dev,
561					 dma_unmap_addr(rx_buf, mapping),
562					 bp->rx_buf_size, DMA_FROM_DEVICE);
563
564		dev_kfree_skb(skb);
565		rx_buf->skb = NULL;
566	}
567}
568
569
570static inline void bnx2x_init_tx_ring(struct bnx2x *bp)
571{
572	int i, j;
573
574	for_each_queue(bp, j) {
575		struct bnx2x_fastpath *fp = &bp->fp[j];
576
577		for (i = 1; i <= NUM_TX_RINGS; i++) {
578			struct eth_tx_next_bd *tx_next_bd =
579				&fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
580
581			tx_next_bd->addr_hi =
582				cpu_to_le32(U64_HI(fp->tx_desc_mapping +
583					    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
584			tx_next_bd->addr_lo =
585				cpu_to_le32(U64_LO(fp->tx_desc_mapping +
586					    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
587		}
588
589		fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
590		fp->tx_db.data.zero_fill1 = 0;
591		fp->tx_db.data.prod = 0;
592
593		fp->tx_pkt_prod = 0;
594		fp->tx_pkt_cons = 0;
595		fp->tx_bd_prod = 0;
596		fp->tx_bd_cons = 0;
597		fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
598		fp->tx_pkt = 0;
599	}
600}
601static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
602{
603	u16 rx_cons_sb;
604
605	/* Tell compiler that status block fields can change */
606	barrier();
607	rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
608	if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
609		rx_cons_sb++;
610	return (fp->rx_comp_cons != rx_cons_sb);
611}
612
613/* HW Lock for shared dual port PHYs */
614void bnx2x_acquire_phy_lock(struct bnx2x *bp);
615void bnx2x_release_phy_lock(struct bnx2x *bp);
616
617void bnx2x_link_report(struct bnx2x *bp);
618int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
619int bnx2x_tx_int(struct bnx2x_fastpath *fp);
620void bnx2x_init_rx_rings(struct bnx2x *bp);
621netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
622
623int bnx2x_change_mac_addr(struct net_device *dev, void *p);
624void bnx2x_tx_timeout(struct net_device *dev);
625void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
626void bnx2x_netif_start(struct bnx2x *bp);
627void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
628void bnx2x_free_irq(struct bnx2x *bp, bool disable_only);
629int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
630int bnx2x_resume(struct pci_dev *pdev);
631void bnx2x_free_skbs(struct bnx2x *bp);
632int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
633int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
634int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
635int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
636
637#endif /* BNX2X_CMN_H */
638