• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/net/vxge/
1/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice.  This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 *                Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14#include <linux/vmalloc.h>
15#include <linux/etherdevice.h>
16#include <linux/pci.h>
17#include <linux/pci_hotplug.h>
18#include <linux/slab.h>
19
20#include "vxge-traffic.h"
21#include "vxge-config.h"
22
23/*
24 * __vxge_hw_channel_allocate - Allocate memory for channel
25 * This function allocates required memory for the channel and various arrays
26 * in the channel
27 */
28struct __vxge_hw_channel*
29__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
30			   enum __vxge_hw_channel_type type,
31	u32 length, u32 per_dtr_space, void *userdata)
32{
33	struct __vxge_hw_channel *channel;
34	struct __vxge_hw_device *hldev;
35	int size = 0;
36	u32 vp_id;
37
38	hldev = vph->vpath->hldev;
39	vp_id = vph->vpath->vp_id;
40
41	switch (type) {
42	case VXGE_HW_CHANNEL_TYPE_FIFO:
43		size = sizeof(struct __vxge_hw_fifo);
44		break;
45	case VXGE_HW_CHANNEL_TYPE_RING:
46		size = sizeof(struct __vxge_hw_ring);
47		break;
48	default:
49		break;
50	}
51
52	channel = kzalloc(size, GFP_KERNEL);
53	if (channel == NULL)
54		goto exit0;
55	INIT_LIST_HEAD(&channel->item);
56
57	channel->common_reg = hldev->common_reg;
58	channel->first_vp_id = hldev->first_vp_id;
59	channel->type = type;
60	channel->devh = hldev;
61	channel->vph = vph;
62	channel->userdata = userdata;
63	channel->per_dtr_space = per_dtr_space;
64	channel->length = length;
65	channel->vp_id = vp_id;
66
67	channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
68	if (channel->work_arr == NULL)
69		goto exit1;
70
71	channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
72	if (channel->free_arr == NULL)
73		goto exit1;
74	channel->free_ptr = length;
75
76	channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
77	if (channel->reserve_arr == NULL)
78		goto exit1;
79	channel->reserve_ptr = length;
80	channel->reserve_top = 0;
81
82	channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
83	if (channel->orig_arr == NULL)
84		goto exit1;
85
86	return channel;
87exit1:
88	__vxge_hw_channel_free(channel);
89
90exit0:
91	return NULL;
92}
93
94/*
95 * __vxge_hw_channel_free - Free memory allocated for channel
96 * This function deallocates memory from the channel and various arrays
97 * in the channel
98 */
99void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
100{
101	kfree(channel->work_arr);
102	kfree(channel->free_arr);
103	kfree(channel->reserve_arr);
104	kfree(channel->orig_arr);
105	kfree(channel);
106}
107
108/*
109 * __vxge_hw_channel_initialize - Initialize a channel
110 * This function initializes a channel by properly setting the
111 * various references
112 */
113enum vxge_hw_status
114__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
115{
116	u32 i;
117	struct __vxge_hw_virtualpath *vpath;
118
119	vpath = channel->vph->vpath;
120
121	if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
122		for (i = 0; i < channel->length; i++)
123			channel->orig_arr[i] = channel->reserve_arr[i];
124	}
125
126	switch (channel->type) {
127	case VXGE_HW_CHANNEL_TYPE_FIFO:
128		vpath->fifoh = (struct __vxge_hw_fifo *)channel;
129		channel->stats = &((struct __vxge_hw_fifo *)
130				channel)->stats->common_stats;
131		break;
132	case VXGE_HW_CHANNEL_TYPE_RING:
133		vpath->ringh = (struct __vxge_hw_ring *)channel;
134		channel->stats = &((struct __vxge_hw_ring *)
135				channel)->stats->common_stats;
136		break;
137	default:
138		break;
139	}
140
141	return VXGE_HW_OK;
142}
143
144/*
145 * __vxge_hw_channel_reset - Resets a channel
146 * This function resets a channel by properly setting the various references
147 */
148enum vxge_hw_status
149__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
150{
151	u32 i;
152
153	for (i = 0; i < channel->length; i++) {
154		if (channel->reserve_arr != NULL)
155			channel->reserve_arr[i] = channel->orig_arr[i];
156		if (channel->free_arr != NULL)
157			channel->free_arr[i] = NULL;
158		if (channel->work_arr != NULL)
159			channel->work_arr[i] = NULL;
160	}
161	channel->free_ptr = channel->length;
162	channel->reserve_ptr = channel->length;
163	channel->reserve_top = 0;
164	channel->post_index = 0;
165	channel->compl_index = 0;
166
167	return VXGE_HW_OK;
168}
169
170/*
171 * __vxge_hw_device_pci_e_init
172 * Initialize certain PCI/PCI-X configuration registers
173 * with recommended values. Save config space for future hw resets.
174 */
175void
176__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
177{
178	u16 cmd = 0;
179
180	/* Set the PErr Repconse bit and SERR in PCI command register. */
181	pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
182	cmd |= 0x140;
183	pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
184
185	pci_save_state(hldev->pdev);
186}
187
188/*
189 * __vxge_hw_device_register_poll
190 * Will poll certain register for specified amount of time.
191 * Will poll until masked bit is not cleared.
192 */
193enum vxge_hw_status
194__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
195{
196	u64 val64;
197	u32 i = 0;
198	enum vxge_hw_status ret = VXGE_HW_FAIL;
199
200	udelay(10);
201
202	do {
203		val64 = readq(reg);
204		if (!(val64 & mask))
205			return VXGE_HW_OK;
206		udelay(100);
207	} while (++i <= 9);
208
209	i = 0;
210	do {
211		val64 = readq(reg);
212		if (!(val64 & mask))
213			return VXGE_HW_OK;
214		mdelay(1);
215	} while (++i <= max_millis);
216
217	return ret;
218}
219
220 /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
221 * in progress
222 * This routine checks the vpath reset in progress register is turned zero
223 */
224enum vxge_hw_status
225__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
226{
227	enum vxge_hw_status status;
228	status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
229			VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
230			VXGE_HW_DEF_DEVICE_POLL_MILLIS);
231	return status;
232}
233
234/*
235 * __vxge_hw_device_toc_get
236 * This routine sets the swapper and reads the toc pointer and returns the
237 * memory mapped address of the toc
238 */
239struct vxge_hw_toc_reg __iomem *
240__vxge_hw_device_toc_get(void __iomem *bar0)
241{
242	u64 val64;
243	struct vxge_hw_toc_reg __iomem *toc = NULL;
244	enum vxge_hw_status status;
245
246	struct vxge_hw_legacy_reg __iomem *legacy_reg =
247		(struct vxge_hw_legacy_reg __iomem *)bar0;
248
249	status = __vxge_hw_legacy_swapper_set(legacy_reg);
250	if (status != VXGE_HW_OK)
251		goto exit;
252
253	val64 =	readq(&legacy_reg->toc_first_pointer);
254	toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
255exit:
256	return toc;
257}
258
259/*
260 * __vxge_hw_device_reg_addr_get
261 * This routine sets the swapper and reads the toc pointer and initializes the
262 * register location pointers in the device object. It waits until the ric is
263 * completed initializing registers.
264 */
265enum vxge_hw_status
266__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
267{
268	u64 val64;
269	u32 i;
270	enum vxge_hw_status status = VXGE_HW_OK;
271
272	hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
273
274	hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
275	if (hldev->toc_reg  == NULL) {
276		status = VXGE_HW_FAIL;
277		goto exit;
278	}
279
280	val64 = readq(&hldev->toc_reg->toc_common_pointer);
281	hldev->common_reg =
282	(struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
283
284	val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
285	hldev->mrpcim_reg =
286		(struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
287
288	for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
289		val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
290		hldev->srpcim_reg[i] =
291			(struct vxge_hw_srpcim_reg __iomem *)
292				(hldev->bar0 + val64);
293	}
294
295	for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
296		val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
297		hldev->vpmgmt_reg[i] =
298		(struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
299	}
300
301	for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
302		val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
303		hldev->vpath_reg[i] =
304			(struct vxge_hw_vpath_reg __iomem *)
305				(hldev->bar0 + val64);
306	}
307
308	val64 = readq(&hldev->toc_reg->toc_kdfc);
309
310	switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
311	case 0:
312		hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
313			VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
314		break;
315	default:
316		break;
317	}
318
319	status = __vxge_hw_device_vpath_reset_in_prog_check(
320			(u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
321exit:
322	return status;
323}
324
325/*
326 * __vxge_hw_device_id_get
327 * This routine returns sets the device id and revision numbers into the device
328 * structure
329 */
330void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
331{
332	u64 val64;
333
334	val64 = readq(&hldev->common_reg->titan_asic_id);
335	hldev->device_id =
336		(u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
337
338	hldev->major_revision =
339		(u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
340
341	hldev->minor_revision =
342		(u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
343}
344
345/*
346 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
347 * This routine returns the Access Rights of the driver
348 */
349static u32
350__vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
351{
352	u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
353
354	switch (host_type) {
355	case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
356		if (func_id == 0) {
357			access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
358					VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
359		}
360		break;
361	case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
362		access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
363				VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
364		break;
365	case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
366		access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
367				VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
368		break;
369	case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
370	case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
371	case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
372		break;
373	case VXGE_HW_SR_VH_FUNCTION0:
374	case VXGE_HW_VH_NORMAL_FUNCTION:
375		access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
376		break;
377	}
378
379	return access_rights;
380}
381/*
382 * __vxge_hw_device_is_privilaged
383 * This routine checks if the device function is privilaged or not
384 */
385
386enum vxge_hw_status
387__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
388{
389	if (__vxge_hw_device_access_rights_get(host_type,
390		func_id) &
391		VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
392		return VXGE_HW_OK;
393	else
394		return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
395}
396
397/*
398 * __vxge_hw_device_host_info_get
399 * This routine returns the host type assignments
400 */
401void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
402{
403	u64 val64;
404	u32 i;
405
406	val64 = readq(&hldev->common_reg->host_type_assignments);
407
408	hldev->host_type =
409	   (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
410
411	hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
412
413	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
414
415		if (!(hldev->vpath_assignments & vxge_mBIT(i)))
416			continue;
417
418		hldev->func_id =
419			__vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]);
420
421		hldev->access_rights = __vxge_hw_device_access_rights_get(
422			hldev->host_type, hldev->func_id);
423
424		hldev->first_vp_id = i;
425		break;
426	}
427}
428
429/*
430 * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
431 * link width and signalling rate.
432 */
433static enum vxge_hw_status
434__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
435{
436	int exp_cap;
437	u16 lnk;
438
439	/* Get the negotiated link width and speed from PCI config space */
440	exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
441	pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
442
443	if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
444		return VXGE_HW_ERR_INVALID_PCI_INFO;
445
446	switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
447	case PCIE_LNK_WIDTH_RESRV:
448	case PCIE_LNK_X1:
449	case PCIE_LNK_X2:
450	case PCIE_LNK_X4:
451	case PCIE_LNK_X8:
452		break;
453	default:
454		return VXGE_HW_ERR_INVALID_PCI_INFO;
455	}
456
457	return VXGE_HW_OK;
458}
459
460/*
461 * __vxge_hw_device_initialize
462 * Initialize Titan-V hardware.
463 */
464enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
465{
466	enum vxge_hw_status status = VXGE_HW_OK;
467
468	if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
469				hldev->func_id)) {
470		/* Validate the pci-e link width and speed */
471		status = __vxge_hw_verify_pci_e_info(hldev);
472		if (status != VXGE_HW_OK)
473			goto exit;
474	}
475
476exit:
477	return status;
478}
479
480/**
481 * vxge_hw_device_hw_info_get - Get the hw information
482 * Returns the vpath mask that has the bits set for each vpath allocated
483 * for the driver, FW version information and the first mac addresse for
484 * each vpath
485 */
486enum vxge_hw_status __devinit
487vxge_hw_device_hw_info_get(void __iomem *bar0,
488			   struct vxge_hw_device_hw_info *hw_info)
489{
490	u32 i;
491	u64 val64;
492	struct vxge_hw_toc_reg __iomem *toc;
493	struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
494	struct vxge_hw_common_reg __iomem *common_reg;
495	struct vxge_hw_vpath_reg __iomem *vpath_reg;
496	struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
497	enum vxge_hw_status status;
498
499	memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
500
501	toc = __vxge_hw_device_toc_get(bar0);
502	if (toc == NULL) {
503		status = VXGE_HW_ERR_CRITICAL;
504		goto exit;
505	}
506
507	val64 = readq(&toc->toc_common_pointer);
508	common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
509
510	status = __vxge_hw_device_vpath_reset_in_prog_check(
511		(u64 __iomem *)&common_reg->vpath_rst_in_prog);
512	if (status != VXGE_HW_OK)
513		goto exit;
514
515	hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
516
517	val64 = readq(&common_reg->host_type_assignments);
518
519	hw_info->host_type =
520	   (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
521
522	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
523
524		if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
525			continue;
526
527		val64 = readq(&toc->toc_vpmgmt_pointer[i]);
528
529		vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
530				(bar0 + val64);
531
532		hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg);
533		if (__vxge_hw_device_access_rights_get(hw_info->host_type,
534			hw_info->func_id) &
535			VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
536
537			val64 = readq(&toc->toc_mrpcim_pointer);
538
539			mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
540					(bar0 + val64);
541
542			writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
543			wmb();
544		}
545
546		val64 = readq(&toc->toc_vpath_pointer[i]);
547
548		vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
549
550		hw_info->function_mode =
551			__vxge_hw_vpath_pci_func_mode_get(i, vpath_reg);
552
553		status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info);
554		if (status != VXGE_HW_OK)
555			goto exit;
556
557		status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info);
558		if (status != VXGE_HW_OK)
559			goto exit;
560
561		break;
562	}
563
564	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
565
566		if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
567			continue;
568
569		val64 = readq(&toc->toc_vpath_pointer[i]);
570		vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
571
572		status =  __vxge_hw_vpath_addr_get(i, vpath_reg,
573				hw_info->mac_addrs[i],
574				hw_info->mac_addr_masks[i]);
575		if (status != VXGE_HW_OK)
576			goto exit;
577	}
578exit:
579	return status;
580}
581
582/*
583 * vxge_hw_device_initialize - Initialize Titan device.
584 * Initialize Titan device. Note that all the arguments of this public API
585 * are 'IN', including @hldev. Driver cooperates with
586 * OS to find new Titan device, locate its PCI and memory spaces.
587 *
588 * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
589 * to enable the latter to perform Titan hardware initialization.
590 */
591enum vxge_hw_status __devinit
592vxge_hw_device_initialize(
593	struct __vxge_hw_device **devh,
594	struct vxge_hw_device_attr *attr,
595	struct vxge_hw_device_config *device_config)
596{
597	u32 i;
598	u32 nblocks = 0;
599	struct __vxge_hw_device *hldev = NULL;
600	enum vxge_hw_status status = VXGE_HW_OK;
601
602	status = __vxge_hw_device_config_check(device_config);
603	if (status != VXGE_HW_OK)
604		goto exit;
605
606	hldev = (struct __vxge_hw_device *)
607			vmalloc(sizeof(struct __vxge_hw_device));
608	if (hldev == NULL) {
609		status = VXGE_HW_ERR_OUT_OF_MEMORY;
610		goto exit;
611	}
612
613	memset(hldev, 0, sizeof(struct __vxge_hw_device));
614	hldev->magic = VXGE_HW_DEVICE_MAGIC;
615
616	vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
617
618	/* apply config */
619	memcpy(&hldev->config, device_config,
620		sizeof(struct vxge_hw_device_config));
621
622	hldev->bar0 = attr->bar0;
623	hldev->pdev = attr->pdev;
624
625	hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
626	hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
627	hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
628
629	__vxge_hw_device_pci_e_init(hldev);
630
631	status = __vxge_hw_device_reg_addr_get(hldev);
632	if (status != VXGE_HW_OK) {
633		vfree(hldev);
634		goto exit;
635	}
636	__vxge_hw_device_id_get(hldev);
637
638	__vxge_hw_device_host_info_get(hldev);
639
640	/* Incrementing for stats blocks */
641	nblocks++;
642
643	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
644
645		if (!(hldev->vpath_assignments & vxge_mBIT(i)))
646			continue;
647
648		if (device_config->vp_config[i].ring.enable ==
649			VXGE_HW_RING_ENABLE)
650			nblocks += device_config->vp_config[i].ring.ring_blocks;
651
652		if (device_config->vp_config[i].fifo.enable ==
653			VXGE_HW_FIFO_ENABLE)
654			nblocks += device_config->vp_config[i].fifo.fifo_blocks;
655		nblocks++;
656	}
657
658	if (__vxge_hw_blockpool_create(hldev,
659		&hldev->block_pool,
660		device_config->dma_blockpool_initial + nblocks,
661		device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
662
663		vxge_hw_device_terminate(hldev);
664		status = VXGE_HW_ERR_OUT_OF_MEMORY;
665		goto exit;
666	}
667
668	status = __vxge_hw_device_initialize(hldev);
669
670	if (status != VXGE_HW_OK) {
671		vxge_hw_device_terminate(hldev);
672		goto exit;
673	}
674
675	*devh = hldev;
676exit:
677	return status;
678}
679
680/*
681 * vxge_hw_device_terminate - Terminate Titan device.
682 * Terminate HW device.
683 */
684void
685vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
686{
687	vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
688
689	hldev->magic = VXGE_HW_DEVICE_DEAD;
690	__vxge_hw_blockpool_destroy(&hldev->block_pool);
691	vfree(hldev);
692}
693
694/*
695 * vxge_hw_device_stats_get - Get the device hw statistics.
696 * Returns the vpath h/w stats for the device.
697 */
698enum vxge_hw_status
699vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
700			struct vxge_hw_device_stats_hw_info *hw_stats)
701{
702	u32 i;
703	enum vxge_hw_status status = VXGE_HW_OK;
704
705	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
706
707		if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
708			(hldev->virtual_paths[i].vp_open ==
709				VXGE_HW_VP_NOT_OPEN))
710			continue;
711
712		memcpy(hldev->virtual_paths[i].hw_stats_sav,
713				hldev->virtual_paths[i].hw_stats,
714				sizeof(struct vxge_hw_vpath_stats_hw_info));
715
716		status = __vxge_hw_vpath_stats_get(
717			&hldev->virtual_paths[i],
718			hldev->virtual_paths[i].hw_stats);
719	}
720
721	memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
722			sizeof(struct vxge_hw_device_stats_hw_info));
723
724	return status;
725}
726
727/*
728 * vxge_hw_driver_stats_get - Get the device sw statistics.
729 * Returns the vpath s/w stats for the device.
730 */
731enum vxge_hw_status vxge_hw_driver_stats_get(
732			struct __vxge_hw_device *hldev,
733			struct vxge_hw_device_stats_sw_info *sw_stats)
734{
735	enum vxge_hw_status status = VXGE_HW_OK;
736
737	memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
738		sizeof(struct vxge_hw_device_stats_sw_info));
739
740	return status;
741}
742
743/*
744 * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
745 *                           and offset and perform an operation
746 * Get the statistics from the given location and offset.
747 */
748enum vxge_hw_status
749vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
750			    u32 operation, u32 location, u32 offset, u64 *stat)
751{
752	u64 val64;
753	enum vxge_hw_status status = VXGE_HW_OK;
754
755	status = __vxge_hw_device_is_privilaged(hldev->host_type,
756			hldev->func_id);
757	if (status != VXGE_HW_OK)
758		goto exit;
759
760	val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
761		VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
762		VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
763		VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
764
765	status = __vxge_hw_pio_mem_write64(val64,
766				&hldev->mrpcim_reg->xmac_stats_sys_cmd,
767				VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
768				hldev->config.device_poll_millis);
769
770	if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
771		*stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
772	else
773		*stat = 0;
774exit:
775	return status;
776}
777
778/*
779 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
780 * Get the Statistics on aggregate port
781 */
782enum vxge_hw_status
783vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
784				   struct vxge_hw_xmac_aggr_stats *aggr_stats)
785{
786	u64 *val64;
787	int i;
788	u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
789	enum vxge_hw_status status = VXGE_HW_OK;
790
791	val64 = (u64 *)aggr_stats;
792
793	status = __vxge_hw_device_is_privilaged(hldev->host_type,
794			hldev->func_id);
795	if (status != VXGE_HW_OK)
796		goto exit;
797
798	for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
799		status = vxge_hw_mrpcim_stats_access(hldev,
800					VXGE_HW_STATS_OP_READ,
801					VXGE_HW_STATS_LOC_AGGR,
802					((offset + (104 * port)) >> 3), val64);
803		if (status != VXGE_HW_OK)
804			goto exit;
805
806		offset += 8;
807		val64++;
808	}
809exit:
810	return status;
811}
812
813/*
814 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
815 * Get the Statistics on port
816 */
817enum vxge_hw_status
818vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
819				   struct vxge_hw_xmac_port_stats *port_stats)
820{
821	u64 *val64;
822	enum vxge_hw_status status = VXGE_HW_OK;
823	int i;
824	u32 offset = 0x0;
825	val64 = (u64 *) port_stats;
826
827	status = __vxge_hw_device_is_privilaged(hldev->host_type,
828			hldev->func_id);
829	if (status != VXGE_HW_OK)
830		goto exit;
831
832	for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
833		status = vxge_hw_mrpcim_stats_access(hldev,
834					VXGE_HW_STATS_OP_READ,
835					VXGE_HW_STATS_LOC_AGGR,
836					((offset + (608 * port)) >> 3), val64);
837		if (status != VXGE_HW_OK)
838			goto exit;
839
840		offset += 8;
841		val64++;
842	}
843
844exit:
845	return status;
846}
847
848/*
849 * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
850 * Get the XMAC Statistics
851 */
852enum vxge_hw_status
853vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
854			      struct vxge_hw_xmac_stats *xmac_stats)
855{
856	enum vxge_hw_status status = VXGE_HW_OK;
857	u32 i;
858
859	status = vxge_hw_device_xmac_aggr_stats_get(hldev,
860					0, &xmac_stats->aggr_stats[0]);
861
862	if (status != VXGE_HW_OK)
863		goto exit;
864
865	status = vxge_hw_device_xmac_aggr_stats_get(hldev,
866				1, &xmac_stats->aggr_stats[1]);
867	if (status != VXGE_HW_OK)
868		goto exit;
869
870	for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
871
872		status = vxge_hw_device_xmac_port_stats_get(hldev,
873					i, &xmac_stats->port_stats[i]);
874		if (status != VXGE_HW_OK)
875			goto exit;
876	}
877
878	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
879
880		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
881			continue;
882
883		status = __vxge_hw_vpath_xmac_tx_stats_get(
884					&hldev->virtual_paths[i],
885					&xmac_stats->vpath_tx_stats[i]);
886		if (status != VXGE_HW_OK)
887			goto exit;
888
889		status = __vxge_hw_vpath_xmac_rx_stats_get(
890					&hldev->virtual_paths[i],
891					&xmac_stats->vpath_rx_stats[i]);
892		if (status != VXGE_HW_OK)
893			goto exit;
894	}
895exit:
896	return status;
897}
898
899/*
900 * vxge_hw_device_debug_set - Set the debug module, level and timestamp
901 * This routine is used to dynamically change the debug output
902 */
903void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
904			      enum vxge_debug_level level, u32 mask)
905{
906	if (hldev == NULL)
907		return;
908
909#if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
910	hldev->debug_module_mask = mask;
911	hldev->debug_level = level;
912#endif
913
914#if defined(VXGE_DEBUG_ERR_MASK)
915	hldev->level_err = level & VXGE_ERR;
916#endif
917
918#if defined(VXGE_DEBUG_TRACE_MASK)
919	hldev->level_trace = level & VXGE_TRACE;
920#endif
921}
922
923/*
924 * vxge_hw_device_error_level_get - Get the error level
925 * This routine returns the current error level set
926 */
927u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
928{
929#if defined(VXGE_DEBUG_ERR_MASK)
930	if (hldev == NULL)
931		return VXGE_ERR;
932	else
933		return hldev->level_err;
934#else
935	return 0;
936#endif
937}
938
939/*
940 * vxge_hw_device_trace_level_get - Get the trace level
941 * This routine returns the current trace level set
942 */
943u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
944{
945#if defined(VXGE_DEBUG_TRACE_MASK)
946	if (hldev == NULL)
947		return VXGE_TRACE;
948	else
949		return hldev->level_trace;
950#else
951	return 0;
952#endif
953}
954/*
955 * vxge_hw_device_debug_mask_get - Get the debug mask
956 * This routine returns the current debug mask set
957 */
958u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev)
959{
960#if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
961	if (hldev == NULL)
962		return 0;
963	return hldev->debug_module_mask;
964#else
965	return 0;
966#endif
967}
968
969/*
970 * vxge_hw_getpause_data -Pause frame frame generation and reception.
971 * Returns the Pause frame generation and reception capability of the NIC.
972 */
973enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
974						 u32 port, u32 *tx, u32 *rx)
975{
976	u64 val64;
977	enum vxge_hw_status status = VXGE_HW_OK;
978
979	if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
980		status = VXGE_HW_ERR_INVALID_DEVICE;
981		goto exit;
982	}
983
984	if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
985		status = VXGE_HW_ERR_INVALID_PORT;
986		goto exit;
987	}
988
989	if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
990		status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
991		goto exit;
992	}
993
994	val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
995	if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
996		*tx = 1;
997	if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
998		*rx = 1;
999exit:
1000	return status;
1001}
1002
1003/*
1004 * vxge_hw_device_setpause_data -  set/reset pause frame generation.
1005 * It can be used to set or reset Pause frame generation or reception
1006 * support of the NIC.
1007 */
1008
1009enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1010						 u32 port, u32 tx, u32 rx)
1011{
1012	u64 val64;
1013	enum vxge_hw_status status = VXGE_HW_OK;
1014
1015	if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1016		status = VXGE_HW_ERR_INVALID_DEVICE;
1017		goto exit;
1018	}
1019
1020	if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1021		status = VXGE_HW_ERR_INVALID_PORT;
1022		goto exit;
1023	}
1024
1025	status = __vxge_hw_device_is_privilaged(hldev->host_type,
1026			hldev->func_id);
1027	if (status != VXGE_HW_OK)
1028		goto exit;
1029
1030	val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1031	if (tx)
1032		val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1033	else
1034		val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1035	if (rx)
1036		val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1037	else
1038		val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1039
1040	writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1041exit:
1042	return status;
1043}
1044
1045u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1046{
1047	int link_width, exp_cap;
1048	u16 lnk;
1049
1050	exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
1051	pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
1052	link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
1053	return link_width;
1054}
1055
1056/*
1057 * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1058 * This function returns the index of memory block
1059 */
1060static inline u32
1061__vxge_hw_ring_block_memblock_idx(u8 *block)
1062{
1063	return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
1064}
1065
1066/*
1067 * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
1068 * This function sets index to a memory block
1069 */
1070static inline void
1071__vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
1072{
1073	*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
1074}
1075
1076/*
1077 * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
1078 * in RxD block
1079 * Sets the next block pointer in RxD block
1080 */
1081static inline void
1082__vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
1083{
1084	*((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
1085}
1086
1087/*
1088 * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
1089 *             first block
1090 * Returns the dma address of the first RxD block
1091 */
1092u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
1093{
1094	struct vxge_hw_mempool_dma *dma_object;
1095
1096	dma_object = ring->mempool->memblocks_dma_arr;
1097	vxge_assert(dma_object != NULL);
1098
1099	return dma_object->addr;
1100}
1101
1102/*
1103 * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
1104 * This function returns the dma address of a given item
1105 */
1106static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
1107					       void *item)
1108{
1109	u32 memblock_idx;
1110	void *memblock;
1111	struct vxge_hw_mempool_dma *memblock_dma_object;
1112	ptrdiff_t dma_item_offset;
1113
1114	/* get owner memblock index */
1115	memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
1116
1117	/* get owner memblock by memblock index */
1118	memblock = mempoolh->memblocks_arr[memblock_idx];
1119
1120	/* get memblock DMA object by memblock index */
1121	memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
1122
1123	/* calculate offset in the memblock of this item */
1124	dma_item_offset = (u8 *)item - (u8 *)memblock;
1125
1126	return memblock_dma_object->addr + dma_item_offset;
1127}
1128
1129/*
1130 * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
1131 * This function returns the dma address of a given item
1132 */
1133static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
1134					 struct __vxge_hw_ring *ring, u32 from,
1135					 u32 to)
1136{
1137	u8 *to_item , *from_item;
1138	dma_addr_t to_dma;
1139
1140	/* get "from" RxD block */
1141	from_item = mempoolh->items_arr[from];
1142	vxge_assert(from_item);
1143
1144	/* get "to" RxD block */
1145	to_item = mempoolh->items_arr[to];
1146	vxge_assert(to_item);
1147
1148	/* return address of the beginning of previous RxD block */
1149	to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
1150
1151	/* set next pointer for this RxD block to point on
1152	 * previous item's DMA start address */
1153	__vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
1154}
1155
1156/*
1157 * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
1158 * block callback
1159 * This function is callback passed to __vxge_hw_mempool_create to create memory
1160 * pool for RxD block
1161 */
1162static void
1163__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
1164				  u32 memblock_index,
1165				  struct vxge_hw_mempool_dma *dma_object,
1166				  u32 index, u32 is_last)
1167{
1168	u32 i;
1169	void *item = mempoolh->items_arr[index];
1170	struct __vxge_hw_ring *ring =
1171		(struct __vxge_hw_ring *)mempoolh->userdata;
1172
1173	/* format rxds array */
1174	for (i = 0; i < ring->rxds_per_block; i++) {
1175		void *rxdblock_priv;
1176		void *uld_priv;
1177		struct vxge_hw_ring_rxd_1 *rxdp;
1178
1179		u32 reserve_index = ring->channel.reserve_ptr -
1180				(index * ring->rxds_per_block + i + 1);
1181		u32 memblock_item_idx;
1182
1183		ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
1184						i * ring->rxd_size;
1185
1186		/* Note: memblock_item_idx is index of the item within
1187		 *       the memblock. For instance, in case of three RxD-blocks
1188		 *       per memblock this value can be 0, 1 or 2. */
1189		rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
1190					memblock_index, item,
1191					&memblock_item_idx);
1192
1193		rxdp = (struct vxge_hw_ring_rxd_1 *)
1194				ring->channel.reserve_arr[reserve_index];
1195
1196		uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
1197
1198		/* pre-format Host_Control */
1199		rxdp->host_control = (u64)(size_t)uld_priv;
1200	}
1201
1202	__vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
1203
1204	if (is_last) {
1205		/* link last one with first one */
1206		__vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
1207	}
1208
1209	if (index > 0) {
1210		/* link this RxD block with previous one */
1211		__vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
1212	}
1213}
1214
1215/*
1216 * __vxge_hw_ring_replenish - Initial replenish of RxDs
1217 * This function replenishes the RxDs from reserve array to work array
1218 */
1219enum vxge_hw_status
1220vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
1221{
1222	void *rxd;
1223	struct __vxge_hw_channel *channel;
1224	enum vxge_hw_status status = VXGE_HW_OK;
1225
1226	channel = &ring->channel;
1227
1228	while (vxge_hw_channel_dtr_count(channel) > 0) {
1229
1230		status = vxge_hw_ring_rxd_reserve(ring, &rxd);
1231
1232		vxge_assert(status == VXGE_HW_OK);
1233
1234		if (ring->rxd_init) {
1235			status = ring->rxd_init(rxd, channel->userdata);
1236			if (status != VXGE_HW_OK) {
1237				vxge_hw_ring_rxd_free(ring, rxd);
1238				goto exit;
1239			}
1240		}
1241
1242		vxge_hw_ring_rxd_post(ring, rxd);
1243	}
1244	status = VXGE_HW_OK;
1245exit:
1246	return status;
1247}
1248
1249/*
1250 * __vxge_hw_ring_create - Create a Ring
1251 * This function creates Ring and initializes it.
1252 *
1253 */
1254enum vxge_hw_status
1255__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
1256		      struct vxge_hw_ring_attr *attr)
1257{
1258	enum vxge_hw_status status = VXGE_HW_OK;
1259	struct __vxge_hw_ring *ring;
1260	u32 ring_length;
1261	struct vxge_hw_ring_config *config;
1262	struct __vxge_hw_device *hldev;
1263	u32 vp_id;
1264	struct vxge_hw_mempool_cbs ring_mp_callback;
1265
1266	if ((vp == NULL) || (attr == NULL)) {
1267		status = VXGE_HW_FAIL;
1268		goto exit;
1269	}
1270
1271	hldev = vp->vpath->hldev;
1272	vp_id = vp->vpath->vp_id;
1273
1274	config = &hldev->config.vp_config[vp_id].ring;
1275
1276	ring_length = config->ring_blocks *
1277			vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1278
1279	ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
1280						VXGE_HW_CHANNEL_TYPE_RING,
1281						ring_length,
1282						attr->per_rxd_space,
1283						attr->userdata);
1284
1285	if (ring == NULL) {
1286		status = VXGE_HW_ERR_OUT_OF_MEMORY;
1287		goto exit;
1288	}
1289
1290	vp->vpath->ringh = ring;
1291	ring->vp_id = vp_id;
1292	ring->vp_reg = vp->vpath->vp_reg;
1293	ring->common_reg = hldev->common_reg;
1294	ring->stats = &vp->vpath->sw_stats->ring_stats;
1295	ring->config = config;
1296	ring->callback = attr->callback;
1297	ring->rxd_init = attr->rxd_init;
1298	ring->rxd_term = attr->rxd_term;
1299	ring->buffer_mode = config->buffer_mode;
1300	ring->rxds_limit = config->rxds_limit;
1301
1302	ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
1303	ring->rxd_priv_size =
1304		sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
1305	ring->per_rxd_space = attr->per_rxd_space;
1306
1307	ring->rxd_priv_size =
1308		((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
1309		VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
1310
1311	/* how many RxDs can fit into one block. Depends on configured
1312	 * buffer_mode. */
1313	ring->rxds_per_block =
1314		vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1315
1316	/* calculate actual RxD block private size */
1317	ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
1318	ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
1319	ring->mempool = __vxge_hw_mempool_create(hldev,
1320				VXGE_HW_BLOCK_SIZE,
1321				VXGE_HW_BLOCK_SIZE,
1322				ring->rxdblock_priv_size,
1323				ring->config->ring_blocks,
1324				ring->config->ring_blocks,
1325				&ring_mp_callback,
1326				ring);
1327
1328	if (ring->mempool == NULL) {
1329		__vxge_hw_ring_delete(vp);
1330		return VXGE_HW_ERR_OUT_OF_MEMORY;
1331	}
1332
1333	status = __vxge_hw_channel_initialize(&ring->channel);
1334	if (status != VXGE_HW_OK) {
1335		__vxge_hw_ring_delete(vp);
1336		goto exit;
1337	}
1338
1339	/* Note:
1340	 * Specifying rxd_init callback means two things:
1341	 * 1) rxds need to be initialized by driver at channel-open time;
1342	 * 2) rxds need to be posted at channel-open time
1343	 *    (that's what the initial_replenish() below does)
1344	 * Currently we don't have a case when the 1) is done without the 2).
1345	 */
1346	if (ring->rxd_init) {
1347		status = vxge_hw_ring_replenish(ring);
1348		if (status != VXGE_HW_OK) {
1349			__vxge_hw_ring_delete(vp);
1350			goto exit;
1351		}
1352	}
1353
1354	/* initial replenish will increment the counter in its post() routine,
1355	 * we have to reset it */
1356	ring->stats->common_stats.usage_cnt = 0;
1357exit:
1358	return status;
1359}
1360
1361/*
1362 * __vxge_hw_ring_abort - Returns the RxD
1363 * This function terminates the RxDs of ring
1364 */
1365enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
1366{
1367	void *rxdh;
1368	struct __vxge_hw_channel *channel;
1369
1370	channel = &ring->channel;
1371
1372	for (;;) {
1373		vxge_hw_channel_dtr_try_complete(channel, &rxdh);
1374
1375		if (rxdh == NULL)
1376			break;
1377
1378		vxge_hw_channel_dtr_complete(channel);
1379
1380		if (ring->rxd_term)
1381			ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
1382				channel->userdata);
1383
1384		vxge_hw_channel_dtr_free(channel, rxdh);
1385	}
1386
1387	return VXGE_HW_OK;
1388}
1389
1390/*
1391 * __vxge_hw_ring_reset - Resets the ring
1392 * This function resets the ring during vpath reset operation
1393 */
1394enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
1395{
1396	enum vxge_hw_status status = VXGE_HW_OK;
1397	struct __vxge_hw_channel *channel;
1398
1399	channel = &ring->channel;
1400
1401	__vxge_hw_ring_abort(ring);
1402
1403	status = __vxge_hw_channel_reset(channel);
1404
1405	if (status != VXGE_HW_OK)
1406		goto exit;
1407
1408	if (ring->rxd_init) {
1409		status = vxge_hw_ring_replenish(ring);
1410		if (status != VXGE_HW_OK)
1411			goto exit;
1412	}
1413exit:
1414	return status;
1415}
1416
1417/*
1418 * __vxge_hw_ring_delete - Removes the ring
1419 * This function freeup the memory pool and removes the ring
1420 */
1421enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
1422{
1423	struct __vxge_hw_ring *ring = vp->vpath->ringh;
1424
1425	__vxge_hw_ring_abort(ring);
1426
1427	if (ring->mempool)
1428		__vxge_hw_mempool_destroy(ring->mempool);
1429
1430	vp->vpath->ringh = NULL;
1431	__vxge_hw_channel_free(&ring->channel);
1432
1433	return VXGE_HW_OK;
1434}
1435
1436/*
1437 * __vxge_hw_mempool_grow
1438 * Will resize mempool up to %num_allocate value.
1439 */
1440enum vxge_hw_status
1441__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
1442		       u32 *num_allocated)
1443{
1444	u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
1445	u32 n_items = mempool->items_per_memblock;
1446	u32 start_block_idx = mempool->memblocks_allocated;
1447	u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
1448	enum vxge_hw_status status = VXGE_HW_OK;
1449
1450	*num_allocated = 0;
1451
1452	if (end_block_idx > mempool->memblocks_max) {
1453		status = VXGE_HW_ERR_OUT_OF_MEMORY;
1454		goto exit;
1455	}
1456
1457	for (i = start_block_idx; i < end_block_idx; i++) {
1458		u32 j;
1459		u32 is_last = ((end_block_idx - 1) == i);
1460		struct vxge_hw_mempool_dma *dma_object =
1461			mempool->memblocks_dma_arr + i;
1462		void *the_memblock;
1463
1464		/* allocate memblock's private part. Each DMA memblock
1465		 * has a space allocated for item's private usage upon
1466		 * mempool's user request. Each time mempool grows, it will
1467		 * allocate new memblock and its private part at once.
1468		 * This helps to minimize memory usage a lot. */
1469		mempool->memblocks_priv_arr[i] =
1470				vmalloc(mempool->items_priv_size * n_items);
1471		if (mempool->memblocks_priv_arr[i] == NULL) {
1472			status = VXGE_HW_ERR_OUT_OF_MEMORY;
1473			goto exit;
1474		}
1475
1476		memset(mempool->memblocks_priv_arr[i], 0,
1477			     mempool->items_priv_size * n_items);
1478
1479		/* allocate DMA-capable memblock */
1480		mempool->memblocks_arr[i] =
1481			__vxge_hw_blockpool_malloc(mempool->devh,
1482				mempool->memblock_size, dma_object);
1483		if (mempool->memblocks_arr[i] == NULL) {
1484			vfree(mempool->memblocks_priv_arr[i]);
1485			status = VXGE_HW_ERR_OUT_OF_MEMORY;
1486			goto exit;
1487		}
1488
1489		(*num_allocated)++;
1490		mempool->memblocks_allocated++;
1491
1492		memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
1493
1494		the_memblock = mempool->memblocks_arr[i];
1495
1496		/* fill the items hash array */
1497		for (j = 0; j < n_items; j++) {
1498			u32 index = i * n_items + j;
1499
1500			if (first_time && index >= mempool->items_initial)
1501				break;
1502
1503			mempool->items_arr[index] =
1504				((char *)the_memblock + j*mempool->item_size);
1505
1506			/* let caller to do more job on each item */
1507			if (mempool->item_func_alloc != NULL)
1508				mempool->item_func_alloc(mempool, i,
1509					dma_object, index, is_last);
1510
1511			mempool->items_current = index + 1;
1512		}
1513
1514		if (first_time && mempool->items_current ==
1515					mempool->items_initial)
1516			break;
1517	}
1518exit:
1519	return status;
1520}
1521
1522/*
1523 * vxge_hw_mempool_create
1524 * This function will create memory pool object. Pool may grow but will
1525 * never shrink. Pool consists of number of dynamically allocated blocks
1526 * with size enough to hold %items_initial number of items. Memory is
1527 * DMA-able but client must map/unmap before interoperating with the device.
1528 */
1529struct vxge_hw_mempool*
1530__vxge_hw_mempool_create(
1531	struct __vxge_hw_device *devh,
1532	u32 memblock_size,
1533	u32 item_size,
1534	u32 items_priv_size,
1535	u32 items_initial,
1536	u32 items_max,
1537	struct vxge_hw_mempool_cbs *mp_callback,
1538	void *userdata)
1539{
1540	enum vxge_hw_status status = VXGE_HW_OK;
1541	u32 memblocks_to_allocate;
1542	struct vxge_hw_mempool *mempool = NULL;
1543	u32 allocated;
1544
1545	if (memblock_size < item_size) {
1546		status = VXGE_HW_FAIL;
1547		goto exit;
1548	}
1549
1550	mempool = (struct vxge_hw_mempool *)
1551			vmalloc(sizeof(struct vxge_hw_mempool));
1552	if (mempool == NULL) {
1553		status = VXGE_HW_ERR_OUT_OF_MEMORY;
1554		goto exit;
1555	}
1556	memset(mempool, 0, sizeof(struct vxge_hw_mempool));
1557
1558	mempool->devh			= devh;
1559	mempool->memblock_size		= memblock_size;
1560	mempool->items_max		= items_max;
1561	mempool->items_initial		= items_initial;
1562	mempool->item_size		= item_size;
1563	mempool->items_priv_size	= items_priv_size;
1564	mempool->item_func_alloc	= mp_callback->item_func_alloc;
1565	mempool->userdata		= userdata;
1566
1567	mempool->memblocks_allocated = 0;
1568
1569	mempool->items_per_memblock = memblock_size / item_size;
1570
1571	mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
1572					mempool->items_per_memblock;
1573
1574	/* allocate array of memblocks */
1575	mempool->memblocks_arr =
1576		(void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1577	if (mempool->memblocks_arr == NULL) {
1578		__vxge_hw_mempool_destroy(mempool);
1579		status = VXGE_HW_ERR_OUT_OF_MEMORY;
1580		mempool = NULL;
1581		goto exit;
1582	}
1583	memset(mempool->memblocks_arr, 0,
1584		sizeof(void *) * mempool->memblocks_max);
1585
1586	/* allocate array of private parts of items per memblocks */
1587	mempool->memblocks_priv_arr =
1588		(void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1589	if (mempool->memblocks_priv_arr == NULL) {
1590		__vxge_hw_mempool_destroy(mempool);
1591		status = VXGE_HW_ERR_OUT_OF_MEMORY;
1592		mempool = NULL;
1593		goto exit;
1594	}
1595	memset(mempool->memblocks_priv_arr, 0,
1596		    sizeof(void *) * mempool->memblocks_max);
1597
1598	/* allocate array of memblocks DMA objects */
1599	mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *)
1600		vmalloc(sizeof(struct vxge_hw_mempool_dma) *
1601			mempool->memblocks_max);
1602
1603	if (mempool->memblocks_dma_arr == NULL) {
1604		__vxge_hw_mempool_destroy(mempool);
1605		status = VXGE_HW_ERR_OUT_OF_MEMORY;
1606		mempool = NULL;
1607		goto exit;
1608	}
1609	memset(mempool->memblocks_dma_arr, 0,
1610			sizeof(struct vxge_hw_mempool_dma) *
1611			mempool->memblocks_max);
1612
1613	/* allocate hash array of items */
1614	mempool->items_arr =
1615		(void **) vmalloc(sizeof(void *) * mempool->items_max);
1616	if (mempool->items_arr == NULL) {
1617		__vxge_hw_mempool_destroy(mempool);
1618		status = VXGE_HW_ERR_OUT_OF_MEMORY;
1619		mempool = NULL;
1620		goto exit;
1621	}
1622	memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
1623
1624	/* calculate initial number of memblocks */
1625	memblocks_to_allocate = (mempool->items_initial +
1626				 mempool->items_per_memblock - 1) /
1627						mempool->items_per_memblock;
1628
1629	/* pre-allocate the mempool */
1630	status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
1631					&allocated);
1632	if (status != VXGE_HW_OK) {
1633		__vxge_hw_mempool_destroy(mempool);
1634		status = VXGE_HW_ERR_OUT_OF_MEMORY;
1635		mempool = NULL;
1636		goto exit;
1637	}
1638
1639exit:
1640	return mempool;
1641}
1642
1643/*
1644 * vxge_hw_mempool_destroy
1645 */
1646void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
1647{
1648	u32 i, j;
1649	struct __vxge_hw_device *devh = mempool->devh;
1650
1651	for (i = 0; i < mempool->memblocks_allocated; i++) {
1652		struct vxge_hw_mempool_dma *dma_object;
1653
1654		vxge_assert(mempool->memblocks_arr[i]);
1655		vxge_assert(mempool->memblocks_dma_arr + i);
1656
1657		dma_object = mempool->memblocks_dma_arr + i;
1658
1659		for (j = 0; j < mempool->items_per_memblock; j++) {
1660			u32 index = i * mempool->items_per_memblock + j;
1661
1662			/* to skip last partially filled(if any) memblock */
1663			if (index >= mempool->items_current)
1664				break;
1665		}
1666
1667		vfree(mempool->memblocks_priv_arr[i]);
1668
1669		__vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
1670				mempool->memblock_size, dma_object);
1671	}
1672
1673	vfree(mempool->items_arr);
1674
1675	vfree(mempool->memblocks_dma_arr);
1676
1677	vfree(mempool->memblocks_priv_arr);
1678
1679	vfree(mempool->memblocks_arr);
1680
1681	vfree(mempool);
1682}
1683
1684/*
1685 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1686 * Check the fifo configuration
1687 */
1688enum vxge_hw_status
1689__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1690{
1691	if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1692	     (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1693		return VXGE_HW_BADCFG_FIFO_BLOCKS;
1694
1695	return VXGE_HW_OK;
1696}
1697
1698/*
1699 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1700 * Check the vpath configuration
1701 */
1702enum vxge_hw_status
1703__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1704{
1705	enum vxge_hw_status status;
1706
1707	if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1708		(vp_config->min_bandwidth >
1709					VXGE_HW_VPATH_BANDWIDTH_MAX))
1710		return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1711
1712	status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1713	if (status != VXGE_HW_OK)
1714		return status;
1715
1716	if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1717		((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1718		(vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1719		return VXGE_HW_BADCFG_VPATH_MTU;
1720
1721	if ((vp_config->rpa_strip_vlan_tag !=
1722		VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1723		(vp_config->rpa_strip_vlan_tag !=
1724		VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1725		(vp_config->rpa_strip_vlan_tag !=
1726		VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1727		return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1728
1729	return VXGE_HW_OK;
1730}
1731
1732/*
1733 * __vxge_hw_device_config_check - Check device configuration.
1734 * Check the device configuration
1735 */
1736enum vxge_hw_status
1737__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1738{
1739	u32 i;
1740	enum vxge_hw_status status;
1741
1742	if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1743	   (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1744	   (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1745	   (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1746		return VXGE_HW_BADCFG_INTR_MODE;
1747
1748	if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1749	   (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1750		return VXGE_HW_BADCFG_RTS_MAC_EN;
1751
1752	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1753		status = __vxge_hw_device_vpath_config_check(
1754				&new_config->vp_config[i]);
1755		if (status != VXGE_HW_OK)
1756			return status;
1757	}
1758
1759	return VXGE_HW_OK;
1760}
1761
1762/*
1763 * vxge_hw_device_config_default_get - Initialize device config with defaults.
1764 * Initialize Titan device config with default values.
1765 */
1766enum vxge_hw_status __devinit
1767vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
1768{
1769	u32 i;
1770
1771	device_config->dma_blockpool_initial =
1772					VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
1773	device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
1774	device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
1775	device_config->rth_en = VXGE_HW_RTH_DEFAULT;
1776	device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
1777	device_config->device_poll_millis =  VXGE_HW_DEF_DEVICE_POLL_MILLIS;
1778	device_config->rts_mac_en =  VXGE_HW_RTS_MAC_DEFAULT;
1779
1780	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1781
1782		device_config->vp_config[i].vp_id = i;
1783
1784		device_config->vp_config[i].min_bandwidth =
1785				VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
1786
1787		device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
1788
1789		device_config->vp_config[i].ring.ring_blocks =
1790				VXGE_HW_DEF_RING_BLOCKS;
1791
1792		device_config->vp_config[i].ring.buffer_mode =
1793				VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
1794
1795		device_config->vp_config[i].ring.scatter_mode =
1796				VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
1797
1798		device_config->vp_config[i].ring.rxds_limit =
1799				VXGE_HW_DEF_RING_RXDS_LIMIT;
1800
1801		device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
1802
1803		device_config->vp_config[i].fifo.fifo_blocks =
1804				VXGE_HW_MIN_FIFO_BLOCKS;
1805
1806		device_config->vp_config[i].fifo.max_frags =
1807				VXGE_HW_MAX_FIFO_FRAGS;
1808
1809		device_config->vp_config[i].fifo.memblock_size =
1810				VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
1811
1812		device_config->vp_config[i].fifo.alignment_size =
1813				VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
1814
1815		device_config->vp_config[i].fifo.intr =
1816				VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
1817
1818		device_config->vp_config[i].fifo.no_snoop_bits =
1819				VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
1820		device_config->vp_config[i].tti.intr_enable =
1821				VXGE_HW_TIM_INTR_DEFAULT;
1822
1823		device_config->vp_config[i].tti.btimer_val =
1824				VXGE_HW_USE_FLASH_DEFAULT;
1825
1826		device_config->vp_config[i].tti.timer_ac_en =
1827				VXGE_HW_USE_FLASH_DEFAULT;
1828
1829		device_config->vp_config[i].tti.timer_ci_en =
1830				VXGE_HW_USE_FLASH_DEFAULT;
1831
1832		device_config->vp_config[i].tti.timer_ri_en =
1833				VXGE_HW_USE_FLASH_DEFAULT;
1834
1835		device_config->vp_config[i].tti.rtimer_val =
1836				VXGE_HW_USE_FLASH_DEFAULT;
1837
1838		device_config->vp_config[i].tti.util_sel =
1839				VXGE_HW_USE_FLASH_DEFAULT;
1840
1841		device_config->vp_config[i].tti.ltimer_val =
1842				VXGE_HW_USE_FLASH_DEFAULT;
1843
1844		device_config->vp_config[i].tti.urange_a =
1845				VXGE_HW_USE_FLASH_DEFAULT;
1846
1847		device_config->vp_config[i].tti.uec_a =
1848				VXGE_HW_USE_FLASH_DEFAULT;
1849
1850		device_config->vp_config[i].tti.urange_b =
1851				VXGE_HW_USE_FLASH_DEFAULT;
1852
1853		device_config->vp_config[i].tti.uec_b =
1854				VXGE_HW_USE_FLASH_DEFAULT;
1855
1856		device_config->vp_config[i].tti.urange_c =
1857				VXGE_HW_USE_FLASH_DEFAULT;
1858
1859		device_config->vp_config[i].tti.uec_c =
1860				VXGE_HW_USE_FLASH_DEFAULT;
1861
1862		device_config->vp_config[i].tti.uec_d =
1863				VXGE_HW_USE_FLASH_DEFAULT;
1864
1865		device_config->vp_config[i].rti.intr_enable =
1866				VXGE_HW_TIM_INTR_DEFAULT;
1867
1868		device_config->vp_config[i].rti.btimer_val =
1869				VXGE_HW_USE_FLASH_DEFAULT;
1870
1871		device_config->vp_config[i].rti.timer_ac_en =
1872				VXGE_HW_USE_FLASH_DEFAULT;
1873
1874		device_config->vp_config[i].rti.timer_ci_en =
1875				VXGE_HW_USE_FLASH_DEFAULT;
1876
1877		device_config->vp_config[i].rti.timer_ri_en =
1878				VXGE_HW_USE_FLASH_DEFAULT;
1879
1880		device_config->vp_config[i].rti.rtimer_val =
1881				VXGE_HW_USE_FLASH_DEFAULT;
1882
1883		device_config->vp_config[i].rti.util_sel =
1884				VXGE_HW_USE_FLASH_DEFAULT;
1885
1886		device_config->vp_config[i].rti.ltimer_val =
1887				VXGE_HW_USE_FLASH_DEFAULT;
1888
1889		device_config->vp_config[i].rti.urange_a =
1890				VXGE_HW_USE_FLASH_DEFAULT;
1891
1892		device_config->vp_config[i].rti.uec_a =
1893				VXGE_HW_USE_FLASH_DEFAULT;
1894
1895		device_config->vp_config[i].rti.urange_b =
1896				VXGE_HW_USE_FLASH_DEFAULT;
1897
1898		device_config->vp_config[i].rti.uec_b =
1899				VXGE_HW_USE_FLASH_DEFAULT;
1900
1901		device_config->vp_config[i].rti.urange_c =
1902				VXGE_HW_USE_FLASH_DEFAULT;
1903
1904		device_config->vp_config[i].rti.uec_c =
1905				VXGE_HW_USE_FLASH_DEFAULT;
1906
1907		device_config->vp_config[i].rti.uec_d =
1908				VXGE_HW_USE_FLASH_DEFAULT;
1909
1910		device_config->vp_config[i].mtu =
1911				VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
1912
1913		device_config->vp_config[i].rpa_strip_vlan_tag =
1914			VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
1915	}
1916
1917	return VXGE_HW_OK;
1918}
1919
1920/*
1921 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
1922 * Set the swapper bits appropriately for the lagacy section.
1923 */
1924enum vxge_hw_status
1925__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
1926{
1927	u64 val64;
1928	enum vxge_hw_status status = VXGE_HW_OK;
1929
1930	val64 = readq(&legacy_reg->toc_swapper_fb);
1931
1932	wmb();
1933
1934	switch (val64) {
1935
1936	case VXGE_HW_SWAPPER_INITIAL_VALUE:
1937		return status;
1938
1939	case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
1940		writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
1941			&legacy_reg->pifm_rd_swap_en);
1942		writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
1943			&legacy_reg->pifm_rd_flip_en);
1944		writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
1945			&legacy_reg->pifm_wr_swap_en);
1946		writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
1947			&legacy_reg->pifm_wr_flip_en);
1948		break;
1949
1950	case VXGE_HW_SWAPPER_BYTE_SWAPPED:
1951		writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
1952			&legacy_reg->pifm_rd_swap_en);
1953		writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
1954			&legacy_reg->pifm_wr_swap_en);
1955		break;
1956
1957	case VXGE_HW_SWAPPER_BIT_FLIPPED:
1958		writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
1959			&legacy_reg->pifm_rd_flip_en);
1960		writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
1961			&legacy_reg->pifm_wr_flip_en);
1962		break;
1963	}
1964
1965	wmb();
1966
1967	val64 = readq(&legacy_reg->toc_swapper_fb);
1968
1969	if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
1970		status = VXGE_HW_ERR_SWAPPER_CTRL;
1971
1972	return status;
1973}
1974
1975/*
1976 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
1977 * Set the swapper bits appropriately for the vpath.
1978 */
1979enum vxge_hw_status
1980__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
1981{
1982#ifndef __BIG_ENDIAN
1983	u64 val64;
1984
1985	val64 = readq(&vpath_reg->vpath_general_cfg1);
1986	wmb();
1987	val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
1988	writeq(val64, &vpath_reg->vpath_general_cfg1);
1989	wmb();
1990#endif
1991	return VXGE_HW_OK;
1992}
1993
1994/*
1995 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
1996 * Set the swapper bits appropriately for the vpath.
1997 */
1998enum vxge_hw_status
1999__vxge_hw_kdfc_swapper_set(
2000	struct vxge_hw_legacy_reg __iomem *legacy_reg,
2001	struct vxge_hw_vpath_reg __iomem *vpath_reg)
2002{
2003	u64 val64;
2004
2005	val64 = readq(&legacy_reg->pifm_wr_swap_en);
2006
2007	if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
2008		val64 = readq(&vpath_reg->kdfcctl_cfg0);
2009		wmb();
2010
2011		val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0	|
2012			VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1	|
2013			VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
2014
2015		writeq(val64, &vpath_reg->kdfcctl_cfg0);
2016		wmb();
2017	}
2018
2019	return VXGE_HW_OK;
2020}
2021
2022/*
2023 * vxge_hw_mgmt_device_config - Retrieve device configuration.
2024 * Get device configuration. Permits to retrieve at run-time configuration
2025 * values that were used to initialize and configure the device.
2026 */
2027enum vxge_hw_status
2028vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev,
2029			   struct vxge_hw_device_config *dev_config, int size)
2030{
2031
2032	if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC))
2033		return VXGE_HW_ERR_INVALID_DEVICE;
2034
2035	if (size != sizeof(struct vxge_hw_device_config))
2036		return VXGE_HW_ERR_VERSION_CONFLICT;
2037
2038	memcpy(dev_config, &hldev->config,
2039		sizeof(struct vxge_hw_device_config));
2040
2041	return VXGE_HW_OK;
2042}
2043
2044/*
2045 * vxge_hw_mgmt_reg_read - Read Titan register.
2046 */
2047enum vxge_hw_status
2048vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
2049		      enum vxge_hw_mgmt_reg_type type,
2050		      u32 index, u32 offset, u64 *value)
2051{
2052	enum vxge_hw_status status = VXGE_HW_OK;
2053
2054	if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2055		status = VXGE_HW_ERR_INVALID_DEVICE;
2056		goto exit;
2057	}
2058
2059	switch (type) {
2060	case vxge_hw_mgmt_reg_type_legacy:
2061		if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2062			status = VXGE_HW_ERR_INVALID_OFFSET;
2063			break;
2064		}
2065		*value = readq((void __iomem *)hldev->legacy_reg + offset);
2066		break;
2067	case vxge_hw_mgmt_reg_type_toc:
2068		if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2069			status = VXGE_HW_ERR_INVALID_OFFSET;
2070			break;
2071		}
2072		*value = readq((void __iomem *)hldev->toc_reg + offset);
2073		break;
2074	case vxge_hw_mgmt_reg_type_common:
2075		if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2076			status = VXGE_HW_ERR_INVALID_OFFSET;
2077			break;
2078		}
2079		*value = readq((void __iomem *)hldev->common_reg + offset);
2080		break;
2081	case vxge_hw_mgmt_reg_type_mrpcim:
2082		if (!(hldev->access_rights &
2083			VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2084			status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2085			break;
2086		}
2087		if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2088			status = VXGE_HW_ERR_INVALID_OFFSET;
2089			break;
2090		}
2091		*value = readq((void __iomem *)hldev->mrpcim_reg + offset);
2092		break;
2093	case vxge_hw_mgmt_reg_type_srpcim:
2094		if (!(hldev->access_rights &
2095			VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2096			status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2097			break;
2098		}
2099		if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2100			status = VXGE_HW_ERR_INVALID_INDEX;
2101			break;
2102		}
2103		if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2104			status = VXGE_HW_ERR_INVALID_OFFSET;
2105			break;
2106		}
2107		*value = readq((void __iomem *)hldev->srpcim_reg[index] +
2108				offset);
2109		break;
2110	case vxge_hw_mgmt_reg_type_vpmgmt:
2111		if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2112			(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2113			status = VXGE_HW_ERR_INVALID_INDEX;
2114			break;
2115		}
2116		if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2117			status = VXGE_HW_ERR_INVALID_OFFSET;
2118			break;
2119		}
2120		*value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
2121				offset);
2122		break;
2123	case vxge_hw_mgmt_reg_type_vpath:
2124		if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
2125			(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2126			status = VXGE_HW_ERR_INVALID_INDEX;
2127			break;
2128		}
2129		if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
2130			status = VXGE_HW_ERR_INVALID_INDEX;
2131			break;
2132		}
2133		if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2134			status = VXGE_HW_ERR_INVALID_OFFSET;
2135			break;
2136		}
2137		*value = readq((void __iomem *)hldev->vpath_reg[index] +
2138				offset);
2139		break;
2140	default:
2141		status = VXGE_HW_ERR_INVALID_TYPE;
2142		break;
2143	}
2144
2145exit:
2146	return status;
2147}
2148
2149/*
2150 * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
2151 */
2152enum vxge_hw_status
2153vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
2154{
2155	struct vxge_hw_vpmgmt_reg       __iomem *vpmgmt_reg;
2156	enum vxge_hw_status status = VXGE_HW_OK;
2157	int i = 0, j = 0;
2158
2159	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2160		if (!((vpath_mask) & vxge_mBIT(i)))
2161			continue;
2162		vpmgmt_reg = hldev->vpmgmt_reg[i];
2163		for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
2164			if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
2165			& VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
2166				return VXGE_HW_FAIL;
2167		}
2168	}
2169	return status;
2170}
2171/*
2172 * vxge_hw_mgmt_reg_Write - Write Titan register.
2173 */
2174enum vxge_hw_status
2175vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
2176		      enum vxge_hw_mgmt_reg_type type,
2177		      u32 index, u32 offset, u64 value)
2178{
2179	enum vxge_hw_status status = VXGE_HW_OK;
2180
2181	if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2182		status = VXGE_HW_ERR_INVALID_DEVICE;
2183		goto exit;
2184	}
2185
2186	switch (type) {
2187	case vxge_hw_mgmt_reg_type_legacy:
2188		if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2189			status = VXGE_HW_ERR_INVALID_OFFSET;
2190			break;
2191		}
2192		writeq(value, (void __iomem *)hldev->legacy_reg + offset);
2193		break;
2194	case vxge_hw_mgmt_reg_type_toc:
2195		if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2196			status = VXGE_HW_ERR_INVALID_OFFSET;
2197			break;
2198		}
2199		writeq(value, (void __iomem *)hldev->toc_reg + offset);
2200		break;
2201	case vxge_hw_mgmt_reg_type_common:
2202		if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2203			status = VXGE_HW_ERR_INVALID_OFFSET;
2204			break;
2205		}
2206		writeq(value, (void __iomem *)hldev->common_reg + offset);
2207		break;
2208	case vxge_hw_mgmt_reg_type_mrpcim:
2209		if (!(hldev->access_rights &
2210			VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2211			status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2212			break;
2213		}
2214		if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2215			status = VXGE_HW_ERR_INVALID_OFFSET;
2216			break;
2217		}
2218		writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
2219		break;
2220	case vxge_hw_mgmt_reg_type_srpcim:
2221		if (!(hldev->access_rights &
2222			VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2223			status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2224			break;
2225		}
2226		if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2227			status = VXGE_HW_ERR_INVALID_INDEX;
2228			break;
2229		}
2230		if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2231			status = VXGE_HW_ERR_INVALID_OFFSET;
2232			break;
2233		}
2234		writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
2235			offset);
2236
2237		break;
2238	case vxge_hw_mgmt_reg_type_vpmgmt:
2239		if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2240			(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2241			status = VXGE_HW_ERR_INVALID_INDEX;
2242			break;
2243		}
2244		if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2245			status = VXGE_HW_ERR_INVALID_OFFSET;
2246			break;
2247		}
2248		writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
2249			offset);
2250		break;
2251	case vxge_hw_mgmt_reg_type_vpath:
2252		if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
2253			(!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2254			status = VXGE_HW_ERR_INVALID_INDEX;
2255			break;
2256		}
2257		if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2258			status = VXGE_HW_ERR_INVALID_OFFSET;
2259			break;
2260		}
2261		writeq(value, (void __iomem *)hldev->vpath_reg[index] +
2262			offset);
2263		break;
2264	default:
2265		status = VXGE_HW_ERR_INVALID_TYPE;
2266		break;
2267	}
2268exit:
2269	return status;
2270}
2271
2272/*
2273 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2274 * list callback
2275 * This function is callback passed to __vxge_hw_mempool_create to create memory
2276 * pool for TxD list
2277 */
2278static void
2279__vxge_hw_fifo_mempool_item_alloc(
2280	struct vxge_hw_mempool *mempoolh,
2281	u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
2282	u32 index, u32 is_last)
2283{
2284	u32 memblock_item_idx;
2285	struct __vxge_hw_fifo_txdl_priv *txdl_priv;
2286	struct vxge_hw_fifo_txd *txdp =
2287		(struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
2288	struct __vxge_hw_fifo *fifo =
2289			(struct __vxge_hw_fifo *)mempoolh->userdata;
2290	void *memblock = mempoolh->memblocks_arr[memblock_index];
2291
2292	vxge_assert(txdp);
2293
2294	txdp->host_control = (u64) (size_t)
2295	__vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
2296					&memblock_item_idx);
2297
2298	txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
2299
2300	vxge_assert(txdl_priv);
2301
2302	fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
2303
2304	/* pre-format HW's TxDL's private */
2305	txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
2306	txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
2307	txdl_priv->dma_handle = dma_object->handle;
2308	txdl_priv->memblock   = memblock;
2309	txdl_priv->first_txdp = txdp;
2310	txdl_priv->next_txdl_priv = NULL;
2311	txdl_priv->alloc_frags = 0;
2312}
2313
2314/*
2315 * __vxge_hw_fifo_create - Create a FIFO
2316 * This function creates FIFO and initializes it.
2317 */
2318enum vxge_hw_status
2319__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
2320		      struct vxge_hw_fifo_attr *attr)
2321{
2322	enum vxge_hw_status status = VXGE_HW_OK;
2323	struct __vxge_hw_fifo *fifo;
2324	struct vxge_hw_fifo_config *config;
2325	u32 txdl_size, txdl_per_memblock;
2326	struct vxge_hw_mempool_cbs fifo_mp_callback;
2327	struct __vxge_hw_virtualpath *vpath;
2328
2329	if ((vp == NULL) || (attr == NULL)) {
2330		status = VXGE_HW_ERR_INVALID_HANDLE;
2331		goto exit;
2332	}
2333	vpath = vp->vpath;
2334	config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
2335
2336	txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
2337
2338	txdl_per_memblock = config->memblock_size / txdl_size;
2339
2340	fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
2341					VXGE_HW_CHANNEL_TYPE_FIFO,
2342					config->fifo_blocks * txdl_per_memblock,
2343					attr->per_txdl_space, attr->userdata);
2344
2345	if (fifo == NULL) {
2346		status = VXGE_HW_ERR_OUT_OF_MEMORY;
2347		goto exit;
2348	}
2349
2350	vpath->fifoh = fifo;
2351	fifo->nofl_db = vpath->nofl_db;
2352
2353	fifo->vp_id = vpath->vp_id;
2354	fifo->vp_reg = vpath->vp_reg;
2355	fifo->stats = &vpath->sw_stats->fifo_stats;
2356
2357	fifo->config = config;
2358
2359	/* apply "interrupts per txdl" attribute */
2360	fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
2361
2362	if (fifo->config->intr)
2363		fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
2364
2365	fifo->no_snoop_bits = config->no_snoop_bits;
2366
2367	/*
2368	 * FIFO memory management strategy:
2369	 *
2370	 * TxDL split into three independent parts:
2371	 *	- set of TxD's
2372	 *	- TxD HW private part
2373	 *	- driver private part
2374	 *
2375	 * Adaptative memory allocation used. i.e. Memory allocated on
2376	 * demand with the size which will fit into one memory block.
2377	 * One memory block may contain more than one TxDL.
2378	 *
2379	 * During "reserve" operations more memory can be allocated on demand
2380	 * for example due to FIFO full condition.
2381	 *
2382	 * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
2383	 * routine which will essentially stop the channel and free resources.
2384	 */
2385
2386	/* TxDL common private size == TxDL private  +  driver private */
2387	fifo->priv_size =
2388		sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
2389	fifo->priv_size = ((fifo->priv_size  +  VXGE_CACHE_LINE_SIZE - 1) /
2390			VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
2391
2392	fifo->per_txdl_space = attr->per_txdl_space;
2393
2394	/* recompute txdl size to be cacheline aligned */
2395	fifo->txdl_size = txdl_size;
2396	fifo->txdl_per_memblock = txdl_per_memblock;
2397
2398	fifo->txdl_term = attr->txdl_term;
2399	fifo->callback = attr->callback;
2400
2401	if (fifo->txdl_per_memblock == 0) {
2402		__vxge_hw_fifo_delete(vp);
2403		status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
2404		goto exit;
2405	}
2406
2407	fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
2408
2409	fifo->mempool =
2410		__vxge_hw_mempool_create(vpath->hldev,
2411			fifo->config->memblock_size,
2412			fifo->txdl_size,
2413			fifo->priv_size,
2414			(fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2415			(fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2416			&fifo_mp_callback,
2417			fifo);
2418
2419	if (fifo->mempool == NULL) {
2420		__vxge_hw_fifo_delete(vp);
2421		status = VXGE_HW_ERR_OUT_OF_MEMORY;
2422		goto exit;
2423	}
2424
2425	status = __vxge_hw_channel_initialize(&fifo->channel);
2426	if (status != VXGE_HW_OK) {
2427		__vxge_hw_fifo_delete(vp);
2428		goto exit;
2429	}
2430
2431	vxge_assert(fifo->channel.reserve_ptr);
2432exit:
2433	return status;
2434}
2435
2436/*
2437 * __vxge_hw_fifo_abort - Returns the TxD
2438 * This function terminates the TxDs of fifo
2439 */
2440enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
2441{
2442	void *txdlh;
2443
2444	for (;;) {
2445		vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
2446
2447		if (txdlh == NULL)
2448			break;
2449
2450		vxge_hw_channel_dtr_complete(&fifo->channel);
2451
2452		if (fifo->txdl_term) {
2453			fifo->txdl_term(txdlh,
2454			VXGE_HW_TXDL_STATE_POSTED,
2455			fifo->channel.userdata);
2456		}
2457
2458		vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
2459	}
2460
2461	return VXGE_HW_OK;
2462}
2463
2464/*
2465 * __vxge_hw_fifo_reset - Resets the fifo
2466 * This function resets the fifo during vpath reset operation
2467 */
2468enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
2469{
2470	enum vxge_hw_status status = VXGE_HW_OK;
2471
2472	__vxge_hw_fifo_abort(fifo);
2473	status = __vxge_hw_channel_reset(&fifo->channel);
2474
2475	return status;
2476}
2477
2478/*
2479 * __vxge_hw_fifo_delete - Removes the FIFO
2480 * This function freeup the memory pool and removes the FIFO
2481 */
2482enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
2483{
2484	struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
2485
2486	__vxge_hw_fifo_abort(fifo);
2487
2488	if (fifo->mempool)
2489		__vxge_hw_mempool_destroy(fifo->mempool);
2490
2491	vp->vpath->fifoh = NULL;
2492
2493	__vxge_hw_channel_free(&fifo->channel);
2494
2495	return VXGE_HW_OK;
2496}
2497
2498/*
2499 * __vxge_hw_vpath_pci_read - Read the content of given address
2500 *                          in pci config space.
2501 * Read from the vpath pci config space.
2502 */
2503enum vxge_hw_status
2504__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
2505			 u32 phy_func_0, u32 offset, u32 *val)
2506{
2507	u64 val64;
2508	enum vxge_hw_status status = VXGE_HW_OK;
2509	struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2510
2511	val64 =	VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
2512
2513	if (phy_func_0)
2514		val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
2515
2516	writeq(val64, &vp_reg->pci_config_access_cfg1);
2517	wmb();
2518	writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
2519			&vp_reg->pci_config_access_cfg2);
2520	wmb();
2521
2522	status = __vxge_hw_device_register_poll(
2523			&vp_reg->pci_config_access_cfg2,
2524			VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2525
2526	if (status != VXGE_HW_OK)
2527		goto exit;
2528
2529	val64 = readq(&vp_reg->pci_config_access_status);
2530
2531	if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
2532		status = VXGE_HW_FAIL;
2533		*val = 0;
2534	} else
2535		*val = (u32)vxge_bVALn(val64, 32, 32);
2536exit:
2537	return status;
2538}
2539
2540/*
2541 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2542 * Returns the function number of the vpath.
2543 */
2544u32
2545__vxge_hw_vpath_func_id_get(u32 vp_id,
2546	struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
2547{
2548	u64 val64;
2549
2550	val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
2551
2552	return
2553	 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
2554}
2555
2556/*
2557 * __vxge_hw_read_rts_ds - Program RTS steering critieria
2558 */
2559static inline void
2560__vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
2561		      u64 dta_struct_sel)
2562{
2563	writeq(0, &vpath_reg->rts_access_steer_ctrl);
2564	wmb();
2565	writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
2566	writeq(0, &vpath_reg->rts_access_steer_data1);
2567	wmb();
2568}
2569
2570
2571/*
2572 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2573 * part number and product description.
2574 */
2575enum vxge_hw_status
2576__vxge_hw_vpath_card_info_get(
2577	u32 vp_id,
2578	struct vxge_hw_vpath_reg __iomem *vpath_reg,
2579	struct vxge_hw_device_hw_info *hw_info)
2580{
2581	u32 i, j;
2582	u64 val64;
2583	u64 data1 = 0ULL;
2584	u64 data2 = 0ULL;
2585	enum vxge_hw_status status = VXGE_HW_OK;
2586	u8 *serial_number = hw_info->serial_number;
2587	u8 *part_number = hw_info->part_number;
2588	u8 *product_desc = hw_info->product_desc;
2589
2590	__vxge_hw_read_rts_ds(vpath_reg,
2591		VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
2592
2593	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2594			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2595		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2596			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2597		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2598		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2599
2600	status = __vxge_hw_pio_mem_write64(val64,
2601				&vpath_reg->rts_access_steer_ctrl,
2602				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2603				VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2604
2605	if (status != VXGE_HW_OK)
2606		return status;
2607
2608	val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2609
2610	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2611		data1 = readq(&vpath_reg->rts_access_steer_data0);
2612		((u64 *)serial_number)[0] = be64_to_cpu(data1);
2613
2614		data2 = readq(&vpath_reg->rts_access_steer_data1);
2615		((u64 *)serial_number)[1] = be64_to_cpu(data2);
2616		status = VXGE_HW_OK;
2617	} else
2618		*serial_number = 0;
2619
2620	__vxge_hw_read_rts_ds(vpath_reg,
2621			VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
2622
2623	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2624			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2625		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2626			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2627		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2628		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2629
2630	status = __vxge_hw_pio_mem_write64(val64,
2631				&vpath_reg->rts_access_steer_ctrl,
2632				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2633				VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2634
2635	if (status != VXGE_HW_OK)
2636		return status;
2637
2638	val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2639
2640	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2641
2642		data1 = readq(&vpath_reg->rts_access_steer_data0);
2643		((u64 *)part_number)[0] = be64_to_cpu(data1);
2644
2645		data2 = readq(&vpath_reg->rts_access_steer_data1);
2646		((u64 *)part_number)[1] = be64_to_cpu(data2);
2647
2648		status = VXGE_HW_OK;
2649
2650	} else
2651		*part_number = 0;
2652
2653	j = 0;
2654
2655	for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
2656	     i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
2657
2658		__vxge_hw_read_rts_ds(vpath_reg, i);
2659
2660		val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2661			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2662			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2663			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2664			VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2665			VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2666
2667		status = __vxge_hw_pio_mem_write64(val64,
2668				&vpath_reg->rts_access_steer_ctrl,
2669				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2670				VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2671
2672		if (status != VXGE_HW_OK)
2673			return status;
2674
2675		val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2676
2677		if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2678
2679			data1 = readq(&vpath_reg->rts_access_steer_data0);
2680			((u64 *)product_desc)[j++] = be64_to_cpu(data1);
2681
2682			data2 = readq(&vpath_reg->rts_access_steer_data1);
2683			((u64 *)product_desc)[j++] = be64_to_cpu(data2);
2684
2685			status = VXGE_HW_OK;
2686		} else
2687			*product_desc = 0;
2688	}
2689
2690	return status;
2691}
2692
2693/*
2694 * __vxge_hw_vpath_fw_ver_get - Get the fw version
2695 * Returns FW Version
2696 */
2697enum vxge_hw_status
2698__vxge_hw_vpath_fw_ver_get(
2699	u32 vp_id,
2700	struct vxge_hw_vpath_reg __iomem *vpath_reg,
2701	struct vxge_hw_device_hw_info *hw_info)
2702{
2703	u64 val64;
2704	u64 data1 = 0ULL;
2705	u64 data2 = 0ULL;
2706	struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
2707	struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
2708	struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
2709	struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
2710	enum vxge_hw_status status = VXGE_HW_OK;
2711
2712	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2713		VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
2714		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2715		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2716		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2717		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2718
2719	status = __vxge_hw_pio_mem_write64(val64,
2720				&vpath_reg->rts_access_steer_ctrl,
2721				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2722				VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2723
2724	if (status != VXGE_HW_OK)
2725		goto exit;
2726
2727	val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2728
2729	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2730
2731		data1 = readq(&vpath_reg->rts_access_steer_data0);
2732		data2 = readq(&vpath_reg->rts_access_steer_data1);
2733
2734		fw_date->day =
2735			(u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
2736						data1);
2737		fw_date->month =
2738			(u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
2739						data1);
2740		fw_date->year =
2741			(u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
2742						data1);
2743
2744		snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
2745			fw_date->month, fw_date->day, fw_date->year);
2746
2747		fw_version->major =
2748		    (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
2749		fw_version->minor =
2750		    (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
2751		fw_version->build =
2752		    (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
2753
2754		snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2755		    fw_version->major, fw_version->minor, fw_version->build);
2756
2757		flash_date->day =
2758		  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
2759		flash_date->month =
2760		 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
2761		flash_date->year =
2762		 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
2763
2764		snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
2765			"%2.2d/%2.2d/%4.4d",
2766			flash_date->month, flash_date->day, flash_date->year);
2767
2768		flash_version->major =
2769		 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
2770		flash_version->minor =
2771		 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
2772		flash_version->build =
2773		 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
2774
2775		snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2776			flash_version->major, flash_version->minor,
2777			flash_version->build);
2778
2779		status = VXGE_HW_OK;
2780
2781	} else
2782		status = VXGE_HW_FAIL;
2783exit:
2784	return status;
2785}
2786
2787/*
2788 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
2789 * Returns pci function mode
2790 */
2791u64
2792__vxge_hw_vpath_pci_func_mode_get(
2793	u32  vp_id,
2794	struct vxge_hw_vpath_reg __iomem *vpath_reg)
2795{
2796	u64 val64;
2797	u64 data1 = 0ULL;
2798	enum vxge_hw_status status = VXGE_HW_OK;
2799
2800	__vxge_hw_read_rts_ds(vpath_reg,
2801		VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
2802
2803	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2804			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2805		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2806			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2807		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2808		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2809
2810	status = __vxge_hw_pio_mem_write64(val64,
2811				&vpath_reg->rts_access_steer_ctrl,
2812				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2813				VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2814
2815	if (status != VXGE_HW_OK)
2816		goto exit;
2817
2818	val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2819
2820	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2821		data1 = readq(&vpath_reg->rts_access_steer_data0);
2822		status = VXGE_HW_OK;
2823	} else {
2824		data1 = 0;
2825		status = VXGE_HW_FAIL;
2826	}
2827exit:
2828	return data1;
2829}
2830
2831/**
2832 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
2833 * @hldev: HW device.
2834 * @on_off: TRUE if flickering to be on, FALSE to be off
2835 *
2836 * Flicker the link LED.
2837 */
2838enum vxge_hw_status
2839vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev,
2840			       u64 on_off)
2841{
2842	u64 val64;
2843	enum vxge_hw_status status = VXGE_HW_OK;
2844	struct vxge_hw_vpath_reg __iomem *vp_reg;
2845
2846	if (hldev == NULL) {
2847		status = VXGE_HW_ERR_INVALID_DEVICE;
2848		goto exit;
2849	}
2850
2851	vp_reg = hldev->vpath_reg[hldev->first_vp_id];
2852
2853	writeq(0, &vp_reg->rts_access_steer_ctrl);
2854	wmb();
2855	writeq(on_off, &vp_reg->rts_access_steer_data0);
2856	writeq(0, &vp_reg->rts_access_steer_data1);
2857	wmb();
2858
2859	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2860			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
2861		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2862			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2863		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2864		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2865
2866	status = __vxge_hw_pio_mem_write64(val64,
2867				&vp_reg->rts_access_steer_ctrl,
2868				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2869				VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2870exit:
2871	return status;
2872}
2873
2874/*
2875 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
2876 */
2877enum vxge_hw_status
2878__vxge_hw_vpath_rts_table_get(
2879	struct __vxge_hw_vpath_handle *vp,
2880	u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2)
2881{
2882	u64 val64;
2883	struct __vxge_hw_virtualpath *vpath;
2884	struct vxge_hw_vpath_reg __iomem *vp_reg;
2885
2886	enum vxge_hw_status status = VXGE_HW_OK;
2887
2888	if (vp == NULL) {
2889		status = VXGE_HW_ERR_INVALID_HANDLE;
2890		goto exit;
2891	}
2892
2893	vpath = vp->vpath;
2894	vp_reg = vpath->vp_reg;
2895
2896	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
2897		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
2898		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2899		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
2900
2901	if ((rts_table ==
2902		VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
2903	    (rts_table ==
2904		VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
2905	    (rts_table ==
2906		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
2907	    (rts_table ==
2908		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
2909		val64 = val64 |	VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
2910	}
2911
2912	status = __vxge_hw_pio_mem_write64(val64,
2913				&vp_reg->rts_access_steer_ctrl,
2914				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2915				vpath->hldev->config.device_poll_millis);
2916
2917	if (status != VXGE_HW_OK)
2918		goto exit;
2919
2920	val64 = readq(&vp_reg->rts_access_steer_ctrl);
2921
2922	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2923
2924		*data1 = readq(&vp_reg->rts_access_steer_data0);
2925
2926		if ((rts_table ==
2927		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
2928		(rts_table ==
2929		VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
2930			*data2 = readq(&vp_reg->rts_access_steer_data1);
2931		}
2932		status = VXGE_HW_OK;
2933	} else
2934		status = VXGE_HW_FAIL;
2935exit:
2936	return status;
2937}
2938
2939/*
2940 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
2941 */
2942enum vxge_hw_status
2943__vxge_hw_vpath_rts_table_set(
2944	struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table,
2945	u32 offset, u64 data1, u64 data2)
2946{
2947	u64 val64;
2948	struct __vxge_hw_virtualpath *vpath;
2949	enum vxge_hw_status status = VXGE_HW_OK;
2950	struct vxge_hw_vpath_reg __iomem *vp_reg;
2951
2952	if (vp == NULL) {
2953		status = VXGE_HW_ERR_INVALID_HANDLE;
2954		goto exit;
2955	}
2956
2957	vpath = vp->vpath;
2958	vp_reg = vpath->vp_reg;
2959
2960	writeq(data1, &vp_reg->rts_access_steer_data0);
2961	wmb();
2962
2963	if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
2964	    (rts_table ==
2965		VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
2966		writeq(data2, &vp_reg->rts_access_steer_data1);
2967		wmb();
2968	}
2969
2970	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
2971		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
2972		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2973		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
2974
2975	status = __vxge_hw_pio_mem_write64(val64,
2976				&vp_reg->rts_access_steer_ctrl,
2977				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2978				vpath->hldev->config.device_poll_millis);
2979
2980	if (status != VXGE_HW_OK)
2981		goto exit;
2982
2983	val64 = readq(&vp_reg->rts_access_steer_ctrl);
2984
2985	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
2986		status = VXGE_HW_OK;
2987	else
2988		status = VXGE_HW_FAIL;
2989exit:
2990	return status;
2991}
2992
2993/*
2994 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
2995 *               from MAC address table.
2996 */
2997enum vxge_hw_status
2998__vxge_hw_vpath_addr_get(
2999	u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
3000	u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
3001{
3002	u32 i;
3003	u64 val64;
3004	u64 data1 = 0ULL;
3005	u64 data2 = 0ULL;
3006	enum vxge_hw_status status = VXGE_HW_OK;
3007
3008	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3009		VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
3010		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3011		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
3012		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3013		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3014
3015	status = __vxge_hw_pio_mem_write64(val64,
3016				&vpath_reg->rts_access_steer_ctrl,
3017				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3018				VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3019
3020	if (status != VXGE_HW_OK)
3021		goto exit;
3022
3023	val64 = readq(&vpath_reg->rts_access_steer_ctrl);
3024
3025	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3026
3027		data1 = readq(&vpath_reg->rts_access_steer_data0);
3028		data2 = readq(&vpath_reg->rts_access_steer_data1);
3029
3030		data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
3031		data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
3032							data2);
3033
3034		for (i = ETH_ALEN; i > 0; i--) {
3035			macaddr[i-1] = (u8)(data1 & 0xFF);
3036			data1 >>= 8;
3037
3038			macaddr_mask[i-1] = (u8)(data2 & 0xFF);
3039			data2 >>= 8;
3040		}
3041		status = VXGE_HW_OK;
3042	} else
3043		status = VXGE_HW_FAIL;
3044exit:
3045	return status;
3046}
3047
3048/*
3049 * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3050 */
3051enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3052			struct __vxge_hw_vpath_handle *vp,
3053			enum vxge_hw_rth_algoritms algorithm,
3054			struct vxge_hw_rth_hash_types *hash_type,
3055			u16 bucket_size)
3056{
3057	u64 data0, data1;
3058	enum vxge_hw_status status = VXGE_HW_OK;
3059
3060	if (vp == NULL) {
3061		status = VXGE_HW_ERR_INVALID_HANDLE;
3062		goto exit;
3063	}
3064
3065	status = __vxge_hw_vpath_rts_table_get(vp,
3066		     VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3067		     VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3068			0, &data0, &data1);
3069
3070	data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3071			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3072
3073	data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
3074	VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
3075	VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
3076
3077	if (hash_type->hash_type_tcpipv4_en)
3078		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
3079
3080	if (hash_type->hash_type_ipv4_en)
3081		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
3082
3083	if (hash_type->hash_type_tcpipv6_en)
3084		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
3085
3086	if (hash_type->hash_type_ipv6_en)
3087		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
3088
3089	if (hash_type->hash_type_tcpipv6ex_en)
3090		data0 |=
3091		VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
3092
3093	if (hash_type->hash_type_ipv6ex_en)
3094		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
3095
3096	if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
3097		data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3098	else
3099		data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3100
3101	status = __vxge_hw_vpath_rts_table_set(vp,
3102		VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
3103		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3104		0, data0, 0);
3105exit:
3106	return status;
3107}
3108
3109static void
3110vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
3111				u16 flag, u8 *itable)
3112{
3113	switch (flag) {
3114	case 1:
3115		*data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
3116			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
3117			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3118			itable[j]);
3119	case 2:
3120		*data0 |=
3121			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
3122			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
3123			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3124			itable[j]);
3125	case 3:
3126		*data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
3127			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
3128			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3129			itable[j]);
3130	case 4:
3131		*data1 |=
3132			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
3133			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
3134			VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3135			itable[j]);
3136	default:
3137		return;
3138	}
3139}
3140/*
3141 * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3142 */
3143enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
3144			struct __vxge_hw_vpath_handle **vpath_handles,
3145			u32 vpath_count,
3146			u8 *mtable,
3147			u8 *itable,
3148			u32 itable_size)
3149{
3150	u32 i, j, action, rts_table;
3151	u64 data0;
3152	u64 data1;
3153	u32 max_entries;
3154	enum vxge_hw_status status = VXGE_HW_OK;
3155	struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
3156
3157	if (vp == NULL) {
3158		status = VXGE_HW_ERR_INVALID_HANDLE;
3159		goto exit;
3160	}
3161
3162	max_entries = (((u32)1) << itable_size);
3163
3164	if (vp->vpath->hldev->config.rth_it_type
3165				== VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
3166		action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3167		rts_table =
3168			VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
3169
3170		for (j = 0; j < max_entries; j++) {
3171
3172			data1 = 0;
3173
3174			data0 =
3175			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3176				itable[j]);
3177
3178			status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
3179				action, rts_table, j, data0, data1);
3180
3181			if (status != VXGE_HW_OK)
3182				goto exit;
3183		}
3184
3185		for (j = 0; j < max_entries; j++) {
3186
3187			data1 = 0;
3188
3189			data0 =
3190			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
3191			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3192				itable[j]);
3193
3194			status = __vxge_hw_vpath_rts_table_set(
3195				vpath_handles[mtable[itable[j]]], action,
3196				rts_table, j, data0, data1);
3197
3198			if (status != VXGE_HW_OK)
3199				goto exit;
3200		}
3201	} else {
3202		action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3203		rts_table =
3204			VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
3205		for (i = 0; i < vpath_count; i++) {
3206
3207			for (j = 0; j < max_entries;) {
3208
3209				data0 = 0;
3210				data1 = 0;
3211
3212				while (j < max_entries) {
3213					if (mtable[itable[j]] != i) {
3214						j++;
3215						continue;
3216					}
3217					vxge_hw_rts_rth_data0_data1_get(j,
3218						&data0, &data1, 1, itable);
3219					j++;
3220					break;
3221				}
3222
3223				while (j < max_entries) {
3224					if (mtable[itable[j]] != i) {
3225						j++;
3226						continue;
3227					}
3228					vxge_hw_rts_rth_data0_data1_get(j,
3229						&data0, &data1, 2, itable);
3230					j++;
3231					break;
3232				}
3233
3234				while (j < max_entries) {
3235					if (mtable[itable[j]] != i) {
3236						j++;
3237						continue;
3238					}
3239					vxge_hw_rts_rth_data0_data1_get(j,
3240						&data0, &data1, 3, itable);
3241					j++;
3242					break;
3243				}
3244
3245				while (j < max_entries) {
3246					if (mtable[itable[j]] != i) {
3247						j++;
3248						continue;
3249					}
3250					vxge_hw_rts_rth_data0_data1_get(j,
3251						&data0, &data1, 4, itable);
3252					j++;
3253					break;
3254				}
3255
3256				if (data0 != 0) {
3257					status = __vxge_hw_vpath_rts_table_set(
3258							vpath_handles[i],
3259							action, rts_table,
3260							0, data0, data1);
3261
3262					if (status != VXGE_HW_OK)
3263						goto exit;
3264				}
3265			}
3266		}
3267	}
3268exit:
3269	return status;
3270}
3271
3272/**
3273 * vxge_hw_vpath_check_leak - Check for memory leak
3274 * @ringh: Handle to the ring object used for receive
3275 *
3276 * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3277 * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3278 * Returns: VXGE_HW_FAIL, if leak has occurred.
3279 *
3280 */
3281enum vxge_hw_status
3282vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
3283{
3284	enum vxge_hw_status status = VXGE_HW_OK;
3285	u64 rxd_new_count, rxd_spat;
3286
3287	if (ring == NULL)
3288		return status;
3289
3290	rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
3291	rxd_spat = readq(&ring->vp_reg->prc_cfg6);
3292	rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
3293
3294	if (rxd_new_count >= rxd_spat)
3295		status = VXGE_HW_FAIL;
3296
3297	return status;
3298}
3299
3300/*
3301 * __vxge_hw_vpath_mgmt_read
3302 * This routine reads the vpath_mgmt registers
3303 */
3304static enum vxge_hw_status
3305__vxge_hw_vpath_mgmt_read(
3306	struct __vxge_hw_device *hldev,
3307	struct __vxge_hw_virtualpath *vpath)
3308{
3309	u32 i, mtu = 0, max_pyld = 0;
3310	u64 val64;
3311	enum vxge_hw_status status = VXGE_HW_OK;
3312
3313	for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3314
3315		val64 = readq(&vpath->vpmgmt_reg->
3316				rxmac_cfg0_port_vpmgmt_clone[i]);
3317		max_pyld =
3318			(u32)
3319			VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3320			(val64);
3321		if (mtu < max_pyld)
3322			mtu = max_pyld;
3323	}
3324
3325	vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
3326
3327	val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
3328
3329	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3330		if (val64 & vxge_mBIT(i))
3331			vpath->vsport_number = i;
3332	}
3333
3334	val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
3335
3336	if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
3337		VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
3338	else
3339		VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
3340
3341	return status;
3342}
3343
3344/*
3345 * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
3346 * This routine checks the vpath_rst_in_prog register to see if
3347 * adapter completed the reset process for the vpath
3348 */
3349enum vxge_hw_status
3350__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
3351{
3352	enum vxge_hw_status status;
3353
3354	status = __vxge_hw_device_register_poll(
3355			&vpath->hldev->common_reg->vpath_rst_in_prog,
3356			VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
3357				1 << (16 - vpath->vp_id)),
3358			vpath->hldev->config.device_poll_millis);
3359
3360	return status;
3361}
3362
3363/*
3364 * __vxge_hw_vpath_reset
3365 * This routine resets the vpath on the device
3366 */
3367enum vxge_hw_status
3368__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3369{
3370	u64 val64;
3371	enum vxge_hw_status status = VXGE_HW_OK;
3372
3373	val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
3374
3375	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
3376				&hldev->common_reg->cmn_rsthdlr_cfg0);
3377
3378	return status;
3379}
3380
3381/*
3382 * __vxge_hw_vpath_sw_reset
3383 * This routine resets the vpath structures
3384 */
3385enum vxge_hw_status
3386__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3387{
3388	enum vxge_hw_status status = VXGE_HW_OK;
3389	struct __vxge_hw_virtualpath *vpath;
3390
3391	vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
3392
3393	if (vpath->ringh) {
3394		status = __vxge_hw_ring_reset(vpath->ringh);
3395		if (status != VXGE_HW_OK)
3396			goto exit;
3397	}
3398
3399	if (vpath->fifoh)
3400		status = __vxge_hw_fifo_reset(vpath->fifoh);
3401exit:
3402	return status;
3403}
3404
3405/*
3406 * __vxge_hw_vpath_prc_configure
3407 * This routine configures the prc registers of virtual path using the config
3408 * passed
3409 */
3410void
3411__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3412{
3413	u64 val64;
3414	struct __vxge_hw_virtualpath *vpath;
3415	struct vxge_hw_vp_config *vp_config;
3416	struct vxge_hw_vpath_reg __iomem *vp_reg;
3417
3418	vpath = &hldev->virtual_paths[vp_id];
3419	vp_reg = vpath->vp_reg;
3420	vp_config = vpath->vp_config;
3421
3422	if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
3423		return;
3424
3425	val64 = readq(&vp_reg->prc_cfg1);
3426	val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
3427	writeq(val64, &vp_reg->prc_cfg1);
3428
3429	val64 = readq(&vpath->vp_reg->prc_cfg6);
3430	val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
3431	writeq(val64, &vpath->vp_reg->prc_cfg6);
3432
3433	val64 = readq(&vp_reg->prc_cfg7);
3434
3435	if (vpath->vp_config->ring.scatter_mode !=
3436		VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
3437
3438		val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
3439
3440		switch (vpath->vp_config->ring.scatter_mode) {
3441		case VXGE_HW_RING_SCATTER_MODE_A:
3442			val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3443					VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
3444			break;
3445		case VXGE_HW_RING_SCATTER_MODE_B:
3446			val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3447					VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
3448			break;
3449		case VXGE_HW_RING_SCATTER_MODE_C:
3450			val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3451					VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
3452			break;
3453		}
3454	}
3455
3456	writeq(val64, &vp_reg->prc_cfg7);
3457
3458	writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
3459				__vxge_hw_ring_first_block_address_get(
3460					vpath->ringh) >> 3), &vp_reg->prc_cfg5);
3461
3462	val64 = readq(&vp_reg->prc_cfg4);
3463	val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
3464	val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
3465
3466	val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
3467			VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
3468
3469	if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
3470		val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
3471	else
3472		val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
3473
3474	writeq(val64, &vp_reg->prc_cfg4);
3475}
3476
3477/*
3478 * __vxge_hw_vpath_kdfc_configure
3479 * This routine configures the kdfc registers of virtual path using the
3480 * config passed
3481 */
3482enum vxge_hw_status
3483__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3484{
3485	u64 val64;
3486	u64 vpath_stride;
3487	enum vxge_hw_status status = VXGE_HW_OK;
3488	struct __vxge_hw_virtualpath *vpath;
3489	struct vxge_hw_vpath_reg __iomem *vp_reg;
3490
3491	vpath = &hldev->virtual_paths[vp_id];
3492	vp_reg = vpath->vp_reg;
3493	status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
3494
3495	if (status != VXGE_HW_OK)
3496		goto exit;
3497
3498	val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
3499
3500	vpath->max_kdfc_db =
3501		(u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
3502			val64+1)/2;
3503
3504	if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3505
3506		vpath->max_nofl_db = vpath->max_kdfc_db;
3507
3508		if (vpath->max_nofl_db <
3509			((vpath->vp_config->fifo.memblock_size /
3510			(vpath->vp_config->fifo.max_frags *
3511			sizeof(struct vxge_hw_fifo_txd))) *
3512			vpath->vp_config->fifo.fifo_blocks)) {
3513
3514			return VXGE_HW_BADCFG_FIFO_BLOCKS;
3515		}
3516		val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
3517				(vpath->max_nofl_db*2)-1);
3518	}
3519
3520	writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
3521
3522	writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
3523		&vp_reg->kdfc_fifo_trpl_ctrl);
3524
3525	val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
3526
3527	val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
3528		   VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
3529
3530	val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
3531		 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
3532#ifndef __BIG_ENDIAN
3533		 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
3534#endif
3535		 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
3536
3537	writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
3538	writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
3539	wmb();
3540	vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
3541
3542	vpath->nofl_db =
3543		(struct __vxge_hw_non_offload_db_wrapper __iomem *)
3544		(hldev->kdfc + (vp_id *
3545		VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
3546					vpath_stride)));
3547exit:
3548	return status;
3549}
3550
3551/*
3552 * __vxge_hw_vpath_mac_configure
3553 * This routine configures the mac of virtual path using the config passed
3554 */
3555enum vxge_hw_status
3556__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3557{
3558	u64 val64;
3559	enum vxge_hw_status status = VXGE_HW_OK;
3560	struct __vxge_hw_virtualpath *vpath;
3561	struct vxge_hw_vp_config *vp_config;
3562	struct vxge_hw_vpath_reg __iomem *vp_reg;
3563
3564	vpath = &hldev->virtual_paths[vp_id];
3565	vp_reg = vpath->vp_reg;
3566	vp_config = vpath->vp_config;
3567
3568	writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
3569			vpath->vsport_number), &vp_reg->xmac_vsport_choice);
3570
3571	if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
3572
3573		val64 = readq(&vp_reg->xmac_rpa_vcfg);
3574
3575		if (vp_config->rpa_strip_vlan_tag !=
3576			VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
3577			if (vp_config->rpa_strip_vlan_tag)
3578				val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3579			else
3580				val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3581		}
3582
3583		writeq(val64, &vp_reg->xmac_rpa_vcfg);
3584		val64 = readq(&vp_reg->rxmac_vcfg0);
3585
3586		if (vp_config->mtu !=
3587				VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
3588			val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
3589			if ((vp_config->mtu  +
3590				VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
3591				val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3592					vp_config->mtu  +
3593					VXGE_HW_MAC_HEADER_MAX_SIZE);
3594			else
3595				val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3596					vpath->max_mtu);
3597		}
3598
3599		writeq(val64, &vp_reg->rxmac_vcfg0);
3600
3601		val64 = readq(&vp_reg->rxmac_vcfg1);
3602
3603		val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
3604			VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
3605
3606		if (hldev->config.rth_it_type ==
3607				VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
3608			val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
3609				0x2) |
3610				VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
3611		}
3612
3613		writeq(val64, &vp_reg->rxmac_vcfg1);
3614	}
3615	return status;
3616}
3617
3618/*
3619 * __vxge_hw_vpath_tim_configure
3620 * This routine configures the tim registers of virtual path using the config
3621 * passed
3622 */
3623enum vxge_hw_status
3624__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3625{
3626	u64 val64;
3627	enum vxge_hw_status status = VXGE_HW_OK;
3628	struct __vxge_hw_virtualpath *vpath;
3629	struct vxge_hw_vpath_reg __iomem *vp_reg;
3630	struct vxge_hw_vp_config *config;
3631
3632	vpath = &hldev->virtual_paths[vp_id];
3633	vp_reg = vpath->vp_reg;
3634	config = vpath->vp_config;
3635
3636	writeq((u64)0, &vp_reg->tim_dest_addr);
3637	writeq((u64)0, &vp_reg->tim_vpath_map);
3638	writeq((u64)0, &vp_reg->tim_bitmap);
3639	writeq((u64)0, &vp_reg->tim_remap);
3640
3641	if (config->ring.enable == VXGE_HW_RING_ENABLE)
3642		writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
3643			(vp_id * VXGE_HW_MAX_INTR_PER_VP) +
3644			VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
3645
3646	val64 = readq(&vp_reg->tim_pci_cfg);
3647	val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
3648	writeq(val64, &vp_reg->tim_pci_cfg);
3649
3650	if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3651
3652		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3653
3654		if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3655			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3656				0x3ffffff);
3657			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3658					config->tti.btimer_val);
3659		}
3660
3661		val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3662
3663		if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3664			if (config->tti.timer_ac_en)
3665				val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3666			else
3667				val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3668		}
3669
3670		if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3671			if (config->tti.timer_ci_en)
3672				val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3673			else
3674				val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3675		}
3676
3677		if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3678			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3679			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3680					config->tti.urange_a);
3681		}
3682
3683		if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3684			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3685			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3686					config->tti.urange_b);
3687		}
3688
3689		if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3690			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3691			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3692					config->tti.urange_c);
3693		}
3694
3695		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3696		val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3697
3698		if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3699			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3700			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3701						config->tti.uec_a);
3702		}
3703
3704		if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3705			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3706			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3707						config->tti.uec_b);
3708		}
3709
3710		if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3711			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3712			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3713						config->tti.uec_c);
3714		}
3715
3716		if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3717			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3718			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3719						config->tti.uec_d);
3720		}
3721
3722		writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3723		val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3724
3725		if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3726			if (config->tti.timer_ri_en)
3727				val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3728			else
3729				val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3730		}
3731
3732		if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3733			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3734					0x3ffffff);
3735			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3736					config->tti.rtimer_val);
3737		}
3738
3739		if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3740			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3741			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3742					config->tti.util_sel);
3743		}
3744
3745		if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3746			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3747					0x3ffffff);
3748			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3749					config->tti.ltimer_val);
3750		}
3751
3752		writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3753	}
3754
3755	if (config->ring.enable == VXGE_HW_RING_ENABLE) {
3756
3757		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3758
3759		if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3760			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3761					0x3ffffff);
3762			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3763					config->rti.btimer_val);
3764		}
3765
3766		val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3767
3768		if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3769			if (config->rti.timer_ac_en)
3770				val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3771			else
3772				val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3773		}
3774
3775		if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3776			if (config->rti.timer_ci_en)
3777				val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3778			else
3779				val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3780		}
3781
3782		if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3783			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3784			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3785					config->rti.urange_a);
3786		}
3787
3788		if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3789			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3790			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3791					config->rti.urange_b);
3792		}
3793
3794		if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3795			val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3796			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3797					config->rti.urange_c);
3798		}
3799
3800		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3801		val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
3802
3803		if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3804			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3805			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3806						config->rti.uec_a);
3807		}
3808
3809		if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3810			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3811			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3812						config->rti.uec_b);
3813		}
3814
3815		if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3816			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3817			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3818						config->rti.uec_c);
3819		}
3820
3821		if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3822			val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3823			val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3824						config->rti.uec_d);
3825		}
3826
3827		writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
3828		val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
3829
3830		if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3831			if (config->rti.timer_ri_en)
3832				val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3833			else
3834				val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3835		}
3836
3837		if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3838			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3839					0x3ffffff);
3840			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3841					config->rti.rtimer_val);
3842		}
3843
3844		if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3845			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3846			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3847					config->rti.util_sel);
3848		}
3849
3850		if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3851			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3852					0x3ffffff);
3853			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3854					config->rti.ltimer_val);
3855		}
3856
3857		writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
3858	}
3859
3860	val64 = 0;
3861	writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
3862	writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
3863	writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
3864	writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
3865	writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
3866	writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
3867
3868	return status;
3869}
3870
3871void
3872vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
3873{
3874	struct __vxge_hw_virtualpath *vpath;
3875	struct vxge_hw_vpath_reg __iomem *vp_reg;
3876	struct vxge_hw_vp_config *config;
3877	u64 val64;
3878
3879	vpath = &hldev->virtual_paths[vp_id];
3880	vp_reg = vpath->vp_reg;
3881	config = vpath->vp_config;
3882
3883	if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3884		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3885
3886		if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
3887			config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
3888			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3889			writeq(val64,
3890			&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3891		}
3892	}
3893}
3894/*
3895 * __vxge_hw_vpath_initialize
3896 * This routine is the final phase of init which initializes the
3897 * registers of the vpath using the configuration passed.
3898 */
3899enum vxge_hw_status
3900__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
3901{
3902	u64 val64;
3903	u32 val32;
3904	enum vxge_hw_status status = VXGE_HW_OK;
3905	struct __vxge_hw_virtualpath *vpath;
3906	struct vxge_hw_vpath_reg __iomem *vp_reg;
3907
3908	vpath = &hldev->virtual_paths[vp_id];
3909
3910	if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
3911		status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
3912		goto exit;
3913	}
3914	vp_reg = vpath->vp_reg;
3915
3916	status =  __vxge_hw_vpath_swapper_set(vpath->vp_reg);
3917
3918	if (status != VXGE_HW_OK)
3919		goto exit;
3920
3921	status =  __vxge_hw_vpath_mac_configure(hldev, vp_id);
3922
3923	if (status != VXGE_HW_OK)
3924		goto exit;
3925
3926	status =  __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
3927
3928	if (status != VXGE_HW_OK)
3929		goto exit;
3930
3931	status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
3932
3933	if (status != VXGE_HW_OK)
3934		goto exit;
3935
3936	val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
3937
3938	/* Get MRRS value from device control */
3939	status  = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
3940
3941	if (status == VXGE_HW_OK) {
3942		val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
3943		val64 &=
3944		    ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
3945		val64 |=
3946		    VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
3947
3948		val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
3949	}
3950
3951	val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
3952	val64 |=
3953	    VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
3954		    VXGE_HW_MAX_PAYLOAD_SIZE_512);
3955
3956	val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
3957	writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
3958
3959exit:
3960	return status;
3961}
3962
3963/*
3964 * __vxge_hw_vp_initialize - Initialize Virtual Path structure
3965 * This routine is the initial phase of init which resets the vpath and
3966 * initializes the software support structures.
3967 */
3968enum vxge_hw_status
3969__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
3970			struct vxge_hw_vp_config *config)
3971{
3972	struct __vxge_hw_virtualpath *vpath;
3973	enum vxge_hw_status status = VXGE_HW_OK;
3974
3975	if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
3976		status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
3977		goto exit;
3978	}
3979
3980	vpath = &hldev->virtual_paths[vp_id];
3981
3982	vpath->vp_id = vp_id;
3983	vpath->vp_open = VXGE_HW_VP_OPEN;
3984	vpath->hldev = hldev;
3985	vpath->vp_config = config;
3986	vpath->vp_reg = hldev->vpath_reg[vp_id];
3987	vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
3988
3989	__vxge_hw_vpath_reset(hldev, vp_id);
3990
3991	status = __vxge_hw_vpath_reset_check(vpath);
3992
3993	if (status != VXGE_HW_OK) {
3994		memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
3995		goto exit;
3996	}
3997
3998	status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
3999
4000	if (status != VXGE_HW_OK) {
4001		memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4002		goto exit;
4003	}
4004
4005	INIT_LIST_HEAD(&vpath->vpath_handles);
4006
4007	vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4008
4009	VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4010		hldev->tim_int_mask1, vp_id);
4011
4012	status = __vxge_hw_vpath_initialize(hldev, vp_id);
4013
4014	if (status != VXGE_HW_OK)
4015		__vxge_hw_vp_terminate(hldev, vp_id);
4016exit:
4017	return status;
4018}
4019
4020/*
4021 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4022 * This routine closes all channels it opened and freeup memory
4023 */
4024void
4025__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4026{
4027	struct __vxge_hw_virtualpath *vpath;
4028
4029	vpath = &hldev->virtual_paths[vp_id];
4030
4031	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4032		goto exit;
4033
4034	VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4035		vpath->hldev->tim_int_mask1, vpath->vp_id);
4036	hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4037
4038	memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4039exit:
4040	return;
4041}
4042
4043/*
4044 * vxge_hw_vpath_mtu_set - Set MTU.
4045 * Set new MTU value. Example, to use jumbo frames:
4046 * vxge_hw_vpath_mtu_set(my_device, 9600);
4047 */
4048enum vxge_hw_status
4049vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
4050{
4051	u64 val64;
4052	enum vxge_hw_status status = VXGE_HW_OK;
4053	struct __vxge_hw_virtualpath *vpath;
4054
4055	if (vp == NULL) {
4056		status = VXGE_HW_ERR_INVALID_HANDLE;
4057		goto exit;
4058	}
4059	vpath = vp->vpath;
4060
4061	new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
4062
4063	if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4064		status = VXGE_HW_ERR_INVALID_MTU_SIZE;
4065
4066	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4067
4068	val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4069	val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
4070
4071	writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4072
4073	vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4074
4075exit:
4076	return status;
4077}
4078
4079/*
4080 * vxge_hw_vpath_open - Open a virtual path on a given adapter
4081 * This function is used to open access to virtual path of an
4082 * adapter for offload, GRO operations. This function returns
4083 * synchronously.
4084 */
4085enum vxge_hw_status
4086vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4087		   struct vxge_hw_vpath_attr *attr,
4088		   struct __vxge_hw_vpath_handle **vpath_handle)
4089{
4090	struct __vxge_hw_virtualpath *vpath;
4091	struct __vxge_hw_vpath_handle *vp;
4092	enum vxge_hw_status status;
4093
4094	vpath = &hldev->virtual_paths[attr->vp_id];
4095
4096	if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4097		status = VXGE_HW_ERR_INVALID_STATE;
4098		goto vpath_open_exit1;
4099	}
4100
4101	status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4102			&hldev->config.vp_config[attr->vp_id]);
4103
4104	if (status != VXGE_HW_OK)
4105		goto vpath_open_exit1;
4106
4107	vp = (struct __vxge_hw_vpath_handle *)
4108		vmalloc(sizeof(struct __vxge_hw_vpath_handle));
4109	if (vp == NULL) {
4110		status = VXGE_HW_ERR_OUT_OF_MEMORY;
4111		goto vpath_open_exit2;
4112	}
4113
4114	memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
4115
4116	vp->vpath = vpath;
4117
4118	if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4119		status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
4120		if (status != VXGE_HW_OK)
4121			goto vpath_open_exit6;
4122	}
4123
4124	if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4125		status = __vxge_hw_ring_create(vp, &attr->ring_attr);
4126		if (status != VXGE_HW_OK)
4127			goto vpath_open_exit7;
4128
4129		__vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4130	}
4131
4132	vpath->fifoh->tx_intr_num =
4133		(attr->vp_id * VXGE_HW_MAX_INTR_PER_VP)  +
4134			VXGE_HW_VPATH_INTR_TX;
4135
4136	vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4137				VXGE_HW_BLOCK_SIZE);
4138
4139	if (vpath->stats_block == NULL) {
4140		status = VXGE_HW_ERR_OUT_OF_MEMORY;
4141		goto vpath_open_exit8;
4142	}
4143
4144	vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
4145			stats_block->memblock;
4146	memset(vpath->hw_stats, 0,
4147		sizeof(struct vxge_hw_vpath_stats_hw_info));
4148
4149	hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4150						vpath->hw_stats;
4151
4152	vpath->hw_stats_sav =
4153		&hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4154	memset(vpath->hw_stats_sav, 0,
4155			sizeof(struct vxge_hw_vpath_stats_hw_info));
4156
4157	writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4158
4159	status = vxge_hw_vpath_stats_enable(vp);
4160	if (status != VXGE_HW_OK)
4161		goto vpath_open_exit8;
4162
4163	list_add(&vp->item, &vpath->vpath_handles);
4164
4165	hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4166
4167	*vpath_handle = vp;
4168
4169	attr->fifo_attr.userdata = vpath->fifoh;
4170	attr->ring_attr.userdata = vpath->ringh;
4171
4172	return VXGE_HW_OK;
4173
4174vpath_open_exit8:
4175	if (vpath->ringh != NULL)
4176		__vxge_hw_ring_delete(vp);
4177vpath_open_exit7:
4178	if (vpath->fifoh != NULL)
4179		__vxge_hw_fifo_delete(vp);
4180vpath_open_exit6:
4181	vfree(vp);
4182vpath_open_exit2:
4183	__vxge_hw_vp_terminate(hldev, attr->vp_id);
4184vpath_open_exit1:
4185
4186	return status;
4187}
4188
4189/**
4190 * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4191 * (vpath) open
4192 * @vp: Handle got from previous vpath open
4193 *
4194 * This function is used to close access to virtual path opened
4195 * earlier.
4196 */
4197void
4198vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4199{
4200	struct __vxge_hw_virtualpath *vpath = NULL;
4201	u64 new_count, val64, val164;
4202	struct __vxge_hw_ring *ring;
4203
4204	vpath = vp->vpath;
4205	ring = vpath->ringh;
4206
4207	new_count = readq(&vpath->vp_reg->rxdmem_size);
4208	new_count &= 0x1fff;
4209	val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
4210
4211	writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4212		&vpath->vp_reg->prc_rxd_doorbell);
4213	readl(&vpath->vp_reg->prc_rxd_doorbell);
4214
4215	val164 /= 2;
4216	val64 = readq(&vpath->vp_reg->prc_cfg6);
4217	val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
4218	val64 &= 0x1ff;
4219
4220	/*
4221	 * Each RxD is of 4 qwords
4222	 */
4223	new_count -= (val64 + 1);
4224	val64 = min(val164, new_count) / 4;
4225
4226	ring->rxds_limit = min(ring->rxds_limit, val64);
4227	if (ring->rxds_limit < 4)
4228		ring->rxds_limit = 4;
4229}
4230
4231/*
4232 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4233 * This function is used to close access to virtual path opened
4234 * earlier.
4235 */
4236enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4237{
4238	struct __vxge_hw_virtualpath *vpath = NULL;
4239	struct __vxge_hw_device *devh = NULL;
4240	u32 vp_id = vp->vpath->vp_id;
4241	u32 is_empty = TRUE;
4242	enum vxge_hw_status status = VXGE_HW_OK;
4243
4244	vpath = vp->vpath;
4245	devh = vpath->hldev;
4246
4247	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4248		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4249		goto vpath_close_exit;
4250	}
4251
4252	list_del(&vp->item);
4253
4254	if (!list_empty(&vpath->vpath_handles)) {
4255		list_add(&vp->item, &vpath->vpath_handles);
4256		is_empty = FALSE;
4257	}
4258
4259	if (!is_empty) {
4260		status = VXGE_HW_FAIL;
4261		goto vpath_close_exit;
4262	}
4263
4264	devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
4265
4266	if (vpath->ringh != NULL)
4267		__vxge_hw_ring_delete(vp);
4268
4269	if (vpath->fifoh != NULL)
4270		__vxge_hw_fifo_delete(vp);
4271
4272	if (vpath->stats_block != NULL)
4273		__vxge_hw_blockpool_block_free(devh, vpath->stats_block);
4274
4275	vfree(vp);
4276
4277	__vxge_hw_vp_terminate(devh, vp_id);
4278
4279	vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4280
4281vpath_close_exit:
4282	return status;
4283}
4284
4285/*
4286 * vxge_hw_vpath_reset - Resets vpath
4287 * This function is used to request a reset of vpath
4288 */
4289enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
4290{
4291	enum vxge_hw_status status;
4292	u32 vp_id;
4293	struct __vxge_hw_virtualpath *vpath = vp->vpath;
4294
4295	vp_id = vpath->vp_id;
4296
4297	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4298		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4299		goto exit;
4300	}
4301
4302	status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
4303	if (status == VXGE_HW_OK)
4304		vpath->sw_stats->soft_reset_cnt++;
4305exit:
4306	return status;
4307}
4308
4309/*
4310 * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
4311 * This function poll's for the vpath reset completion and re initializes
4312 * the vpath.
4313 */
4314enum vxge_hw_status
4315vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
4316{
4317	struct __vxge_hw_virtualpath *vpath = NULL;
4318	enum vxge_hw_status status;
4319	struct __vxge_hw_device *hldev;
4320	u32 vp_id;
4321
4322	vp_id = vp->vpath->vp_id;
4323	vpath = vp->vpath;
4324	hldev = vpath->hldev;
4325
4326	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4327		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4328		goto exit;
4329	}
4330
4331	status = __vxge_hw_vpath_reset_check(vpath);
4332	if (status != VXGE_HW_OK)
4333		goto exit;
4334
4335	status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
4336	if (status != VXGE_HW_OK)
4337		goto exit;
4338
4339	status = __vxge_hw_vpath_initialize(hldev, vp_id);
4340	if (status != VXGE_HW_OK)
4341		goto exit;
4342
4343	if (vpath->ringh != NULL)
4344		__vxge_hw_vpath_prc_configure(hldev, vp_id);
4345
4346	memset(vpath->hw_stats, 0,
4347		sizeof(struct vxge_hw_vpath_stats_hw_info));
4348
4349	memset(vpath->hw_stats_sav, 0,
4350		sizeof(struct vxge_hw_vpath_stats_hw_info));
4351
4352	writeq(vpath->stats_block->dma_addr,
4353		&vpath->vp_reg->stats_cfg);
4354
4355	status = vxge_hw_vpath_stats_enable(vp);
4356
4357exit:
4358	return status;
4359}
4360
4361/*
4362 * vxge_hw_vpath_enable - Enable vpath.
4363 * This routine clears the vpath reset thereby enabling a vpath
4364 * to start forwarding frames and generating interrupts.
4365 */
4366void
4367vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
4368{
4369	struct __vxge_hw_device *hldev;
4370	u64 val64;
4371
4372	hldev = vp->vpath->hldev;
4373
4374	val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
4375		1 << (16 - vp->vpath->vp_id));
4376
4377	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4378		&hldev->common_reg->cmn_rsthdlr_cfg1);
4379}
4380
4381/*
4382 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4383 * Enable the DMA vpath statistics. The function is to be called to re-enable
4384 * the adapter to update stats into the host memory
4385 */
4386enum vxge_hw_status
4387vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4388{
4389	enum vxge_hw_status status = VXGE_HW_OK;
4390	struct __vxge_hw_virtualpath *vpath;
4391
4392	vpath = vp->vpath;
4393
4394	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4395		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4396		goto exit;
4397	}
4398
4399	memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4400			sizeof(struct vxge_hw_vpath_stats_hw_info));
4401
4402	status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4403exit:
4404	return status;
4405}
4406
4407/*
4408 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4409 *                           and offset and perform an operation
4410 */
4411enum vxge_hw_status
4412__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
4413			     u32 operation, u32 offset, u64 *stat)
4414{
4415	u64 val64;
4416	enum vxge_hw_status status = VXGE_HW_OK;
4417	struct vxge_hw_vpath_reg __iomem *vp_reg;
4418
4419	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4420		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4421		goto vpath_stats_access_exit;
4422	}
4423
4424	vp_reg = vpath->vp_reg;
4425
4426	val64 =  VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
4427		 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
4428		 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
4429
4430	status = __vxge_hw_pio_mem_write64(val64,
4431				&vp_reg->xmac_stats_access_cmd,
4432				VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
4433				vpath->hldev->config.device_poll_millis);
4434
4435	if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
4436		*stat = readq(&vp_reg->xmac_stats_access_data);
4437	else
4438		*stat = 0;
4439
4440vpath_stats_access_exit:
4441	return status;
4442}
4443
4444/*
4445 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4446 */
4447enum vxge_hw_status
4448__vxge_hw_vpath_xmac_tx_stats_get(
4449	struct __vxge_hw_virtualpath *vpath,
4450	struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
4451{
4452	u64 *val64;
4453	int i;
4454	u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
4455	enum vxge_hw_status status = VXGE_HW_OK;
4456
4457	val64 = (u64 *) vpath_tx_stats;
4458
4459	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4460		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4461		goto exit;
4462	}
4463
4464	for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
4465		status = __vxge_hw_vpath_stats_access(vpath,
4466					VXGE_HW_STATS_OP_READ,
4467					offset, val64);
4468		if (status != VXGE_HW_OK)
4469			goto exit;
4470		offset++;
4471		val64++;
4472	}
4473exit:
4474	return status;
4475}
4476
4477/*
4478 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4479 */
4480enum vxge_hw_status
4481__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
4482			struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
4483{
4484	u64 *val64;
4485	enum vxge_hw_status status = VXGE_HW_OK;
4486	int i;
4487	u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
4488	val64 = (u64 *) vpath_rx_stats;
4489
4490	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4491		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4492		goto exit;
4493	}
4494	for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
4495		status = __vxge_hw_vpath_stats_access(vpath,
4496					VXGE_HW_STATS_OP_READ,
4497					offset >> 3, val64);
4498		if (status != VXGE_HW_OK)
4499			goto exit;
4500
4501		offset += 8;
4502		val64++;
4503	}
4504exit:
4505	return status;
4506}
4507
4508/*
4509 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4510 */
4511enum vxge_hw_status __vxge_hw_vpath_stats_get(
4512			struct __vxge_hw_virtualpath *vpath,
4513			struct vxge_hw_vpath_stats_hw_info *hw_stats)
4514{
4515	u64 val64;
4516	enum vxge_hw_status status = VXGE_HW_OK;
4517	struct vxge_hw_vpath_reg __iomem *vp_reg;
4518
4519	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4520		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4521		goto exit;
4522	}
4523	vp_reg = vpath->vp_reg;
4524
4525	val64 = readq(&vp_reg->vpath_debug_stats0);
4526	hw_stats->ini_num_mwr_sent =
4527		(u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
4528
4529	val64 = readq(&vp_reg->vpath_debug_stats1);
4530	hw_stats->ini_num_mrd_sent =
4531		(u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
4532
4533	val64 = readq(&vp_reg->vpath_debug_stats2);
4534	hw_stats->ini_num_cpl_rcvd =
4535		(u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
4536
4537	val64 = readq(&vp_reg->vpath_debug_stats3);
4538	hw_stats->ini_num_mwr_byte_sent =
4539		VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
4540
4541	val64 = readq(&vp_reg->vpath_debug_stats4);
4542	hw_stats->ini_num_cpl_byte_rcvd =
4543		VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
4544
4545	val64 = readq(&vp_reg->vpath_debug_stats5);
4546	hw_stats->wrcrdtarb_xoff =
4547		(u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
4548
4549	val64 = readq(&vp_reg->vpath_debug_stats6);
4550	hw_stats->rdcrdtarb_xoff =
4551		(u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
4552
4553	val64 = readq(&vp_reg->vpath_genstats_count01);
4554	hw_stats->vpath_genstats_count0 =
4555	(u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4556		val64);
4557
4558	val64 = readq(&vp_reg->vpath_genstats_count01);
4559	hw_stats->vpath_genstats_count1 =
4560	(u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4561		val64);
4562
4563	val64 = readq(&vp_reg->vpath_genstats_count23);
4564	hw_stats->vpath_genstats_count2 =
4565	(u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4566		val64);
4567
4568	val64 = readq(&vp_reg->vpath_genstats_count01);
4569	hw_stats->vpath_genstats_count3 =
4570	(u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4571		val64);
4572
4573	val64 = readq(&vp_reg->vpath_genstats_count4);
4574	hw_stats->vpath_genstats_count4 =
4575	(u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4576		val64);
4577
4578	val64 = readq(&vp_reg->vpath_genstats_count5);
4579	hw_stats->vpath_genstats_count5 =
4580	(u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4581		val64);
4582
4583	status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
4584	if (status != VXGE_HW_OK)
4585		goto exit;
4586
4587	status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
4588	if (status != VXGE_HW_OK)
4589		goto exit;
4590
4591	VXGE_HW_VPATH_STATS_PIO_READ(
4592		VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
4593
4594	hw_stats->prog_event_vnum0 =
4595			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
4596
4597	hw_stats->prog_event_vnum1 =
4598			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
4599
4600	VXGE_HW_VPATH_STATS_PIO_READ(
4601		VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
4602
4603	hw_stats->prog_event_vnum2 =
4604			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
4605
4606	hw_stats->prog_event_vnum3 =
4607			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
4608
4609	val64 = readq(&vp_reg->rx_multi_cast_stats);
4610	hw_stats->rx_multi_cast_frame_discard =
4611		(u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
4612
4613	val64 = readq(&vp_reg->rx_frm_transferred);
4614	hw_stats->rx_frm_transferred =
4615		(u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
4616
4617	val64 = readq(&vp_reg->rxd_returned);
4618	hw_stats->rxd_returned =
4619		(u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
4620
4621	val64 = readq(&vp_reg->dbg_stats_rx_mpa);
4622	hw_stats->rx_mpa_len_fail_frms =
4623		(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
4624	hw_stats->rx_mpa_mrk_fail_frms =
4625		(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
4626	hw_stats->rx_mpa_crc_fail_frms =
4627		(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
4628
4629	val64 = readq(&vp_reg->dbg_stats_rx_fau);
4630	hw_stats->rx_permitted_frms =
4631		(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
4632	hw_stats->rx_vp_reset_discarded_frms =
4633	(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
4634	hw_stats->rx_wol_frms =
4635		(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
4636
4637	val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
4638	hw_stats->tx_vp_reset_discarded_frms =
4639	(u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4640		val64);
4641exit:
4642	return status;
4643}
4644
4645/*
4646 * __vxge_hw_blockpool_create - Create block pool
4647 */
4648
4649enum vxge_hw_status
4650__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
4651			   struct __vxge_hw_blockpool *blockpool,
4652			   u32 pool_size,
4653			   u32 pool_max)
4654{
4655	u32 i;
4656	struct __vxge_hw_blockpool_entry *entry = NULL;
4657	void *memblock;
4658	dma_addr_t dma_addr;
4659	struct pci_dev *dma_handle;
4660	struct pci_dev *acc_handle;
4661	enum vxge_hw_status status = VXGE_HW_OK;
4662
4663	if (blockpool == NULL) {
4664		status = VXGE_HW_FAIL;
4665		goto blockpool_create_exit;
4666	}
4667
4668	blockpool->hldev = hldev;
4669	blockpool->block_size = VXGE_HW_BLOCK_SIZE;
4670	blockpool->pool_size = 0;
4671	blockpool->pool_max = pool_max;
4672	blockpool->req_out = 0;
4673
4674	INIT_LIST_HEAD(&blockpool->free_block_list);
4675	INIT_LIST_HEAD(&blockpool->free_entry_list);
4676
4677	for (i = 0; i < pool_size + pool_max; i++) {
4678		entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4679				GFP_KERNEL);
4680		if (entry == NULL) {
4681			__vxge_hw_blockpool_destroy(blockpool);
4682			status = VXGE_HW_ERR_OUT_OF_MEMORY;
4683			goto blockpool_create_exit;
4684		}
4685		list_add(&entry->item, &blockpool->free_entry_list);
4686	}
4687
4688	for (i = 0; i < pool_size; i++) {
4689
4690		memblock = vxge_os_dma_malloc(
4691				hldev->pdev,
4692				VXGE_HW_BLOCK_SIZE,
4693				&dma_handle,
4694				&acc_handle);
4695
4696		if (memblock == NULL) {
4697			__vxge_hw_blockpool_destroy(blockpool);
4698			status = VXGE_HW_ERR_OUT_OF_MEMORY;
4699			goto blockpool_create_exit;
4700		}
4701
4702		dma_addr = pci_map_single(hldev->pdev, memblock,
4703				VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
4704
4705		if (unlikely(pci_dma_mapping_error(hldev->pdev,
4706				dma_addr))) {
4707
4708			vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
4709			__vxge_hw_blockpool_destroy(blockpool);
4710			status = VXGE_HW_ERR_OUT_OF_MEMORY;
4711			goto blockpool_create_exit;
4712		}
4713
4714		if (!list_empty(&blockpool->free_entry_list))
4715			entry = (struct __vxge_hw_blockpool_entry *)
4716				list_first_entry(&blockpool->free_entry_list,
4717					struct __vxge_hw_blockpool_entry,
4718					item);
4719
4720		if (entry == NULL)
4721			entry =
4722			    kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4723					GFP_KERNEL);
4724		if (entry != NULL) {
4725			list_del(&entry->item);
4726			entry->length = VXGE_HW_BLOCK_SIZE;
4727			entry->memblock = memblock;
4728			entry->dma_addr = dma_addr;
4729			entry->acc_handle = acc_handle;
4730			entry->dma_handle = dma_handle;
4731			list_add(&entry->item,
4732					  &blockpool->free_block_list);
4733			blockpool->pool_size++;
4734		} else {
4735			__vxge_hw_blockpool_destroy(blockpool);
4736			status = VXGE_HW_ERR_OUT_OF_MEMORY;
4737			goto blockpool_create_exit;
4738		}
4739	}
4740
4741blockpool_create_exit:
4742	return status;
4743}
4744
4745/*
4746 * __vxge_hw_blockpool_destroy - Deallocates the block pool
4747 */
4748
4749void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
4750{
4751
4752	struct __vxge_hw_device *hldev;
4753	struct list_head *p, *n;
4754	u16 ret;
4755
4756	if (blockpool == NULL) {
4757		ret = 1;
4758		goto exit;
4759	}
4760
4761	hldev = blockpool->hldev;
4762
4763	list_for_each_safe(p, n, &blockpool->free_block_list) {
4764
4765		pci_unmap_single(hldev->pdev,
4766			((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4767			((struct __vxge_hw_blockpool_entry *)p)->length,
4768			PCI_DMA_BIDIRECTIONAL);
4769
4770		vxge_os_dma_free(hldev->pdev,
4771			((struct __vxge_hw_blockpool_entry *)p)->memblock,
4772			&((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
4773
4774		list_del(
4775			&((struct __vxge_hw_blockpool_entry *)p)->item);
4776		kfree(p);
4777		blockpool->pool_size--;
4778	}
4779
4780	list_for_each_safe(p, n, &blockpool->free_entry_list) {
4781		list_del(
4782			&((struct __vxge_hw_blockpool_entry *)p)->item);
4783		kfree((void *)p);
4784	}
4785	ret = 0;
4786exit:
4787	return;
4788}
4789
4790/*
4791 * __vxge_hw_blockpool_blocks_add - Request additional blocks
4792 */
4793static
4794void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
4795{
4796	u32 nreq = 0, i;
4797
4798	if ((blockpool->pool_size  +  blockpool->req_out) <
4799		VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
4800		nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
4801		blockpool->req_out += nreq;
4802	}
4803
4804	for (i = 0; i < nreq; i++)
4805		vxge_os_dma_malloc_async(
4806			((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4807			blockpool->hldev, VXGE_HW_BLOCK_SIZE);
4808}
4809
4810/*
4811 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
4812 */
4813static
4814void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
4815{
4816	struct list_head *p, *n;
4817
4818	list_for_each_safe(p, n, &blockpool->free_block_list) {
4819
4820		if (blockpool->pool_size < blockpool->pool_max)
4821			break;
4822
4823		pci_unmap_single(
4824			((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4825			((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4826			((struct __vxge_hw_blockpool_entry *)p)->length,
4827			PCI_DMA_BIDIRECTIONAL);
4828
4829		vxge_os_dma_free(
4830			((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4831			((struct __vxge_hw_blockpool_entry *)p)->memblock,
4832			&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
4833
4834		list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
4835
4836		list_add(p, &blockpool->free_entry_list);
4837
4838		blockpool->pool_size--;
4839
4840	}
4841}
4842
4843/*
4844 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
4845 * Adds a block to block pool
4846 */
4847void vxge_hw_blockpool_block_add(
4848			struct __vxge_hw_device *devh,
4849			void *block_addr,
4850			u32 length,
4851			struct pci_dev *dma_h,
4852			struct pci_dev *acc_handle)
4853{
4854	struct __vxge_hw_blockpool  *blockpool;
4855	struct __vxge_hw_blockpool_entry  *entry = NULL;
4856	dma_addr_t dma_addr;
4857	enum vxge_hw_status status = VXGE_HW_OK;
4858	u32 req_out;
4859
4860	blockpool = &devh->block_pool;
4861
4862	if (block_addr == NULL) {
4863		blockpool->req_out--;
4864		status = VXGE_HW_FAIL;
4865		goto exit;
4866	}
4867
4868	dma_addr = pci_map_single(devh->pdev, block_addr, length,
4869				PCI_DMA_BIDIRECTIONAL);
4870
4871	if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
4872
4873		vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
4874		blockpool->req_out--;
4875		status = VXGE_HW_FAIL;
4876		goto exit;
4877	}
4878
4879
4880	if (!list_empty(&blockpool->free_entry_list))
4881		entry = (struct __vxge_hw_blockpool_entry *)
4882			list_first_entry(&blockpool->free_entry_list,
4883				struct __vxge_hw_blockpool_entry,
4884				item);
4885
4886	if (entry == NULL)
4887		entry = (struct __vxge_hw_blockpool_entry *)
4888			vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
4889	else
4890		list_del(&entry->item);
4891
4892	if (entry != NULL) {
4893		entry->length = length;
4894		entry->memblock = block_addr;
4895		entry->dma_addr = dma_addr;
4896		entry->acc_handle = acc_handle;
4897		entry->dma_handle = dma_h;
4898		list_add(&entry->item, &blockpool->free_block_list);
4899		blockpool->pool_size++;
4900		status = VXGE_HW_OK;
4901	} else
4902		status = VXGE_HW_ERR_OUT_OF_MEMORY;
4903
4904	blockpool->req_out--;
4905
4906	req_out = blockpool->req_out;
4907exit:
4908	return;
4909}
4910
4911/*
4912 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
4913 * Allocates a block of memory of given size, either from block pool
4914 * or by calling vxge_os_dma_malloc()
4915 */
4916void *
4917__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
4918				struct vxge_hw_mempool_dma *dma_object)
4919{
4920	struct __vxge_hw_blockpool_entry *entry = NULL;
4921	struct __vxge_hw_blockpool  *blockpool;
4922	void *memblock = NULL;
4923	enum vxge_hw_status status = VXGE_HW_OK;
4924
4925	blockpool = &devh->block_pool;
4926
4927	if (size != blockpool->block_size) {
4928
4929		memblock = vxge_os_dma_malloc(devh->pdev, size,
4930						&dma_object->handle,
4931						&dma_object->acc_handle);
4932
4933		if (memblock == NULL) {
4934			status = VXGE_HW_ERR_OUT_OF_MEMORY;
4935			goto exit;
4936		}
4937
4938		dma_object->addr = pci_map_single(devh->pdev, memblock, size,
4939					PCI_DMA_BIDIRECTIONAL);
4940
4941		if (unlikely(pci_dma_mapping_error(devh->pdev,
4942				dma_object->addr))) {
4943			vxge_os_dma_free(devh->pdev, memblock,
4944				&dma_object->acc_handle);
4945			status = VXGE_HW_ERR_OUT_OF_MEMORY;
4946			goto exit;
4947		}
4948
4949	} else {
4950
4951		if (!list_empty(&blockpool->free_block_list))
4952			entry = (struct __vxge_hw_blockpool_entry *)
4953				list_first_entry(&blockpool->free_block_list,
4954					struct __vxge_hw_blockpool_entry,
4955					item);
4956
4957		if (entry != NULL) {
4958			list_del(&entry->item);
4959			dma_object->addr = entry->dma_addr;
4960			dma_object->handle = entry->dma_handle;
4961			dma_object->acc_handle = entry->acc_handle;
4962			memblock = entry->memblock;
4963
4964			list_add(&entry->item,
4965				&blockpool->free_entry_list);
4966			blockpool->pool_size--;
4967		}
4968
4969		if (memblock != NULL)
4970			__vxge_hw_blockpool_blocks_add(blockpool);
4971	}
4972exit:
4973	return memblock;
4974}
4975
4976/*
4977 * __vxge_hw_blockpool_free - Frees the memory allcoated with
4978				__vxge_hw_blockpool_malloc
4979 */
4980void
4981__vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
4982			void *memblock, u32 size,
4983			struct vxge_hw_mempool_dma *dma_object)
4984{
4985	struct __vxge_hw_blockpool_entry *entry = NULL;
4986	struct __vxge_hw_blockpool  *blockpool;
4987	enum vxge_hw_status status = VXGE_HW_OK;
4988
4989	blockpool = &devh->block_pool;
4990
4991	if (size != blockpool->block_size) {
4992		pci_unmap_single(devh->pdev, dma_object->addr, size,
4993			PCI_DMA_BIDIRECTIONAL);
4994		vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
4995	} else {
4996
4997		if (!list_empty(&blockpool->free_entry_list))
4998			entry = (struct __vxge_hw_blockpool_entry *)
4999				list_first_entry(&blockpool->free_entry_list,
5000					struct __vxge_hw_blockpool_entry,
5001					item);
5002
5003		if (entry == NULL)
5004			entry = (struct __vxge_hw_blockpool_entry *)
5005				vmalloc(sizeof(
5006					struct __vxge_hw_blockpool_entry));
5007		else
5008			list_del(&entry->item);
5009
5010		if (entry != NULL) {
5011			entry->length = size;
5012			entry->memblock = memblock;
5013			entry->dma_addr = dma_object->addr;
5014			entry->acc_handle = dma_object->acc_handle;
5015			entry->dma_handle = dma_object->handle;
5016			list_add(&entry->item,
5017					&blockpool->free_block_list);
5018			blockpool->pool_size++;
5019			status = VXGE_HW_OK;
5020		} else
5021			status = VXGE_HW_ERR_OUT_OF_MEMORY;
5022
5023		if (status == VXGE_HW_OK)
5024			__vxge_hw_blockpool_blocks_remove(blockpool);
5025	}
5026}
5027
5028/*
5029 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5030 * This function allocates a block from block pool or from the system
5031 */
5032struct __vxge_hw_blockpool_entry *
5033__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5034{
5035	struct __vxge_hw_blockpool_entry *entry = NULL;
5036	struct __vxge_hw_blockpool  *blockpool;
5037
5038	blockpool = &devh->block_pool;
5039
5040	if (size == blockpool->block_size) {
5041
5042		if (!list_empty(&blockpool->free_block_list))
5043			entry = (struct __vxge_hw_blockpool_entry *)
5044				list_first_entry(&blockpool->free_block_list,
5045					struct __vxge_hw_blockpool_entry,
5046					item);
5047
5048		if (entry != NULL) {
5049			list_del(&entry->item);
5050			blockpool->pool_size--;
5051		}
5052	}
5053
5054	if (entry != NULL)
5055		__vxge_hw_blockpool_blocks_add(blockpool);
5056
5057	return entry;
5058}
5059
5060/*
5061 * __vxge_hw_blockpool_block_free - Frees a block from block pool
5062 * @devh: Hal device
5063 * @entry: Entry of block to be freed
5064 *
5065 * This function frees a block from block pool
5066 */
5067void
5068__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
5069			struct __vxge_hw_blockpool_entry *entry)
5070{
5071	struct __vxge_hw_blockpool  *blockpool;
5072
5073	blockpool = &devh->block_pool;
5074
5075	if (entry->length == blockpool->block_size) {
5076		list_add(&entry->item, &blockpool->free_block_list);
5077		blockpool->pool_size++;
5078	}
5079
5080	__vxge_hw_blockpool_blocks_remove(blockpool);
5081}
5082