• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/staging/crystalhd/
1/***************************************************************************
2 * Copyright (c) 2005-2009, Broadcom Corporation.
3 *
4 *  Name: crystalhd_hw . c
5 *
6 *  Description:
7 *		BCM70010 Linux driver HW layer.
8 *
9 **********************************************************************
10 * This file is part of the crystalhd device driver.
11 *
12 * This driver is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation, version 2 of the License.
15 *
16 * This driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this driver.  If not, see <http://www.gnu.org/licenses/>.
23 **********************************************************************/
24
25#include <linux/pci.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include "crystalhd_hw.h"
29
30/* Functions internal to this file */
31
32static void crystalhd_enable_uarts(struct crystalhd_adp *adp)
33{
34	bc_dec_reg_wr(adp, UartSelectA, BSVS_UART_STREAM);
35	bc_dec_reg_wr(adp, UartSelectB, BSVS_UART_DEC_OUTER);
36}
37
38
39static void crystalhd_start_dram(struct crystalhd_adp *adp)
40{
41	bc_dec_reg_wr(adp, SDRAM_PARAM, ((40 / 5 - 1) <<  0) |
42	/* tras (40ns tras)/(5ns period) -1 ((15/5 - 1) <<  4) | // trcd */
43		      ((15 / 5 - 1) <<  7) |	/* trp */
44		      ((10 / 5 - 1) << 10) |	/* trrd */
45		      ((15 / 5 + 1) << 12) |	/* twr */
46		      ((2 + 1) << 16) |		/* twtr */
47		      ((70 / 5 - 2) << 19) |	/* trfc */
48		      (0 << 23));
49
50	bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
51	bc_dec_reg_wr(adp, SDRAM_EXT_MODE, 2);
52	bc_dec_reg_wr(adp, SDRAM_MODE, 0x132);
53	bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
54	bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
55	bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
56	bc_dec_reg_wr(adp, SDRAM_MODE, 0x32);
57	/* setting the refresh rate here */
58	bc_dec_reg_wr(adp, SDRAM_REF_PARAM, ((1 << 12) | 96));
59}
60
61
62static bool crystalhd_bring_out_of_rst(struct crystalhd_adp *adp)
63{
64	union link_misc_perst_deco_ctrl rst_deco_cntrl;
65	union link_misc_perst_clk_ctrl rst_clk_cntrl;
66	uint32_t temp;
67
68	/*
69	 * Link clocks: MISC_PERST_CLOCK_CTRL Clear PLL power down bit,
70	 * delay to allow PLL to lock Clear alternate clock, stop clock bits
71	 */
72	rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
73	rst_clk_cntrl.pll_pwr_dn = 0;
74	crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
75	msleep_interruptible(50);
76
77	rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
78	rst_clk_cntrl.stop_core_clk = 0;
79	rst_clk_cntrl.sel_alt_clk = 0;
80
81	crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
82	msleep_interruptible(50);
83
84	/*
85	 * Bus Arbiter Timeout: GISB_ARBITER_TIMER
86	 * Set internal bus arbiter timeout to 40us based on core clock speed
87	 * (63MHz * 40us = 0x9D8)
88	 */
89	crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x9D8);
90
91	/*
92	 * Decoder clocks: MISC_PERST_DECODER_CTRL
93	 * Enable clocks while 7412 reset is asserted, delay
94	 * De-assert 7412 reset
95	 */
96	rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
97	rst_deco_cntrl.stop_bcm_7412_clk = 0;
98	rst_deco_cntrl.bcm7412_rst = 1;
99	crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
100	msleep_interruptible(10);
101
102	rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
103	rst_deco_cntrl.bcm7412_rst = 0;
104	crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
105	msleep_interruptible(50);
106
107	/* Disable OTP_CONTENT_MISC to 0 to disable all secure modes */
108	crystalhd_reg_wr(adp, OTP_CONTENT_MISC, 0);
109
110	/* Clear bit 29 of 0x404 */
111	temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
112	temp &= ~BC_BIT(29);
113	crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
114
115	/* 2.5V regulator must be set to 2.6 volts (+6%) */
116	temp = crystalhd_reg_rd(adp, MISC_PERST_VREG_CTRL);
117	crystalhd_reg_wr(adp, MISC_PERST_VREG_CTRL, 0xF3);
118
119	return true;
120}
121
122static bool crystalhd_put_in_reset(struct crystalhd_adp *adp)
123{
124	union link_misc_perst_deco_ctrl rst_deco_cntrl;
125	union link_misc_perst_clk_ctrl  rst_clk_cntrl;
126	uint32_t                  temp;
127
128	/*
129	 * Decoder clocks: MISC_PERST_DECODER_CTRL
130	 * Assert 7412 reset, delay
131	 * Assert 7412 stop clock
132	 */
133	rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
134	rst_deco_cntrl.stop_bcm_7412_clk = 1;
135	crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
136	msleep_interruptible(50);
137
138	/* Bus Arbiter Timeout: GISB_ARBITER_TIMER
139	 * Set internal bus arbiter timeout to 40us based on core clock speed
140	 * (6.75MHZ * 40us = 0x10E)
141	 */
142	crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x10E);
143
144	/* Link clocks: MISC_PERST_CLOCK_CTRL
145	 * Stop core clk, delay
146	 * Set alternate clk, delay, set PLL power down
147	 */
148	rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
149	rst_clk_cntrl.stop_core_clk = 1;
150	rst_clk_cntrl.sel_alt_clk = 1;
151	crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
152	msleep_interruptible(50);
153
154	rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
155	rst_clk_cntrl.pll_pwr_dn = 1;
156	crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
157
158	/*
159	 * Read and restore the Transaction Configuration Register
160	 * after core reset
161	 */
162	temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
163
164	/*
165	 * Link core soft reset: MISC3_RESET_CTRL
166	 * - Write BIT[0]=1 and read it back for core reset to take place
167	 */
168	crystalhd_reg_wr(adp, MISC3_RESET_CTRL, 1);
169	rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC3_RESET_CTRL);
170	msleep_interruptible(50);
171
172	/* restore the transaction configuration register */
173	crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
174
175	return true;
176}
177
178static void crystalhd_disable_interrupts(struct crystalhd_adp *adp)
179{
180	union intr_mask_reg   intr_mask;
181	intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
182	intr_mask.mask_pcie_err = 1;
183	intr_mask.mask_pcie_rbusmast_err = 1;
184	intr_mask.mask_pcie_rgr_bridge   = 1;
185	intr_mask.mask_rx_done = 1;
186	intr_mask.mask_rx_err  = 1;
187	intr_mask.mask_tx_done = 1;
188	intr_mask.mask_tx_err  = 1;
189	crystalhd_reg_wr(adp, INTR_INTR_MSK_SET_REG, intr_mask.whole_reg);
190
191	return;
192}
193
194static void crystalhd_enable_interrupts(struct crystalhd_adp *adp)
195{
196	union intr_mask_reg   intr_mask;
197	intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
198	intr_mask.mask_pcie_err = 1;
199	intr_mask.mask_pcie_rbusmast_err = 1;
200	intr_mask.mask_pcie_rgr_bridge   = 1;
201	intr_mask.mask_rx_done = 1;
202	intr_mask.mask_rx_err  = 1;
203	intr_mask.mask_tx_done = 1;
204	intr_mask.mask_tx_err  = 1;
205	crystalhd_reg_wr(adp, INTR_INTR_MSK_CLR_REG, intr_mask.whole_reg);
206
207	return;
208}
209
210static void crystalhd_clear_errors(struct crystalhd_adp *adp)
211{
212	uint32_t reg;
213
214	reg = crystalhd_reg_rd(adp, MISC1_Y_RX_ERROR_STATUS);
215	if (reg)
216		crystalhd_reg_wr(adp, MISC1_Y_RX_ERROR_STATUS, reg);
217
218	reg = crystalhd_reg_rd(adp, MISC1_UV_RX_ERROR_STATUS);
219	if (reg)
220		crystalhd_reg_wr(adp, MISC1_UV_RX_ERROR_STATUS, reg);
221
222	reg = crystalhd_reg_rd(adp, MISC1_TX_DMA_ERROR_STATUS);
223	if (reg)
224		crystalhd_reg_wr(adp, MISC1_TX_DMA_ERROR_STATUS, reg);
225}
226
227static void crystalhd_clear_interrupts(struct crystalhd_adp *adp)
228{
229	uint32_t intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
230
231	if (intr_sts) {
232		crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
233
234		/* Write End Of Interrupt for PCIE */
235		crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
236	}
237}
238
239static void crystalhd_soft_rst(struct crystalhd_adp *adp)
240{
241	uint32_t val;
242
243	/* Assert c011 soft reset*/
244	bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000001);
245	msleep_interruptible(50);
246
247	/* Release c011 soft reset*/
248	bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000000);
249
250	/* Disable Stuffing..*/
251	val = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
252	val |= BC_BIT(8);
253	crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, val);
254}
255
256static bool crystalhd_load_firmware_config(struct crystalhd_adp *adp)
257{
258	uint32_t i = 0, reg;
259
260	crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (BC_DRAM_FW_CFG_ADDR >> 19));
261
262	crystalhd_reg_wr(adp, AES_CMD, 0);
263	crystalhd_reg_wr(adp, AES_CONFIG_INFO, (BC_DRAM_FW_CFG_ADDR & 0x7FFFF));
264	crystalhd_reg_wr(adp, AES_CMD, 0x1);
265
266	for (i = 0; i < 100; ++i) {
267		reg = crystalhd_reg_rd(adp, AES_STATUS);
268		if (reg & 0x1)
269			return true;
270		msleep_interruptible(10);
271	}
272
273	return false;
274}
275
276
277static bool crystalhd_start_device(struct crystalhd_adp *adp)
278{
279	uint32_t dbg_options, glb_cntrl = 0, reg_pwrmgmt = 0;
280
281	BCMLOG(BCMLOG_INFO, "Starting BCM70012 Device\n");
282
283	reg_pwrmgmt = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
284	reg_pwrmgmt &= ~ASPM_L1_ENABLE;
285
286	crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg_pwrmgmt);
287
288	if (!crystalhd_bring_out_of_rst(adp)) {
289		BCMLOG_ERR("Failed To Bring Link Out Of Reset\n");
290		return false;
291	}
292
293	crystalhd_disable_interrupts(adp);
294
295	crystalhd_clear_errors(adp);
296
297	crystalhd_clear_interrupts(adp);
298
299	crystalhd_enable_interrupts(adp);
300
301	/* Enable the option for getting the total no. of DWORDS
302	 * that have been transfered by the RXDMA engine
303	 */
304	dbg_options = crystalhd_reg_rd(adp, MISC1_DMA_DEBUG_OPTIONS_REG);
305	dbg_options |= 0x10;
306	crystalhd_reg_wr(adp, MISC1_DMA_DEBUG_OPTIONS_REG, dbg_options);
307
308	/* Enable PCI Global Control options */
309	glb_cntrl = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
310	glb_cntrl |= 0x100;
311	glb_cntrl |= 0x8000;
312	crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, glb_cntrl);
313
314	crystalhd_enable_interrupts(adp);
315
316	crystalhd_soft_rst(adp);
317	crystalhd_start_dram(adp);
318	crystalhd_enable_uarts(adp);
319
320	return true;
321}
322
323static bool crystalhd_stop_device(struct crystalhd_adp *adp)
324{
325	uint32_t reg;
326
327	BCMLOG(BCMLOG_INFO, "Stopping BCM70012 Device\n");
328	/* Clear and disable interrupts */
329	crystalhd_disable_interrupts(adp);
330	crystalhd_clear_errors(adp);
331	crystalhd_clear_interrupts(adp);
332
333	if (!crystalhd_put_in_reset(adp))
334		BCMLOG_ERR("Failed to Put Link To Reset State\n");
335
336	reg = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
337	reg |= ASPM_L1_ENABLE;
338	crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg);
339
340	/* Set PCI Clk Req */
341	reg = crystalhd_reg_rd(adp, PCIE_CLK_REQ_REG);
342	reg |= PCI_CLK_REQ_ENABLE;
343	crystalhd_reg_wr(adp, PCIE_CLK_REQ_REG, reg);
344
345	return true;
346}
347
348static struct crystalhd_rx_dma_pkt *crystalhd_hw_alloc_rx_pkt(struct crystalhd_hw *hw)
349{
350	unsigned long flags = 0;
351	struct crystalhd_rx_dma_pkt *temp = NULL;
352
353	if (!hw)
354		return NULL;
355
356	spin_lock_irqsave(&hw->lock, flags);
357	temp = hw->rx_pkt_pool_head;
358	if (temp) {
359		hw->rx_pkt_pool_head = hw->rx_pkt_pool_head->next;
360		temp->dio_req = NULL;
361		temp->pkt_tag = 0;
362		temp->flags = 0;
363	}
364	spin_unlock_irqrestore(&hw->lock, flags);
365
366	return temp;
367}
368
369static void crystalhd_hw_free_rx_pkt(struct crystalhd_hw *hw,
370				   struct crystalhd_rx_dma_pkt *pkt)
371{
372	unsigned long flags = 0;
373
374	if (!hw || !pkt)
375		return;
376
377	spin_lock_irqsave(&hw->lock, flags);
378	pkt->next = hw->rx_pkt_pool_head;
379	hw->rx_pkt_pool_head = pkt;
380	spin_unlock_irqrestore(&hw->lock, flags);
381}
382
383/*
384 * Call back from TX - IOQ deletion.
385 *
386 * This routine will release the TX DMA rings allocated
387 * druing setup_dma rings interface.
388 *
389 * Memory is allocated per DMA ring basis. This is just
390 * a place holder to be able to create the dio queues.
391 */
392static void crystalhd_tx_desc_rel_call_back(void *context, void *data)
393{
394}
395
396/*
397 * Rx Packet release callback..
398 *
399 * Release All user mapped capture buffers and Our DMA packets
400 * back to our free pool. The actual cleanup of the DMA
401 * ring descriptors happen during dma ring release.
402 */
403static void crystalhd_rx_pkt_rel_call_back(void *context, void *data)
404{
405	struct crystalhd_hw *hw = (struct crystalhd_hw *)context;
406	struct crystalhd_rx_dma_pkt *pkt = (struct crystalhd_rx_dma_pkt *)data;
407
408	if (!pkt || !hw) {
409		BCMLOG_ERR("Invalid arg - %p %p\n", hw, pkt);
410		return;
411	}
412
413	if (pkt->dio_req)
414		crystalhd_unmap_dio(hw->adp, pkt->dio_req);
415	else
416		BCMLOG_ERR("Missing dio_req: 0x%x\n", pkt->pkt_tag);
417
418	crystalhd_hw_free_rx_pkt(hw, pkt);
419}
420
421#define crystalhd_hw_delete_ioq(adp, q)		\
422	if (q) {				\
423		crystalhd_delete_dioq(adp, q);	\
424		q = NULL;			\
425	}
426
427static void crystalhd_hw_delete_ioqs(struct crystalhd_hw *hw)
428{
429	if (!hw)
430		return;
431
432	BCMLOG(BCMLOG_DBG, "Deleting IOQs\n");
433	crystalhd_hw_delete_ioq(hw->adp, hw->tx_actq);
434	crystalhd_hw_delete_ioq(hw->adp, hw->tx_freeq);
435	crystalhd_hw_delete_ioq(hw->adp, hw->rx_actq);
436	crystalhd_hw_delete_ioq(hw->adp, hw->rx_freeq);
437	crystalhd_hw_delete_ioq(hw->adp, hw->rx_rdyq);
438}
439
440#define crystalhd_hw_create_ioq(sts, hw, q, cb)			\
441do {								\
442	sts = crystalhd_create_dioq(hw->adp, &q, cb, hw);	\
443	if (sts != BC_STS_SUCCESS)				\
444		goto hw_create_ioq_err;				\
445} while (0)
446
447/*
448 * Create IOQs..
449 *
450 * TX - Active & Free
451 * RX - Active, Ready and Free.
452 */
453static enum BC_STATUS crystalhd_hw_create_ioqs(struct crystalhd_hw   *hw)
454{
455	enum BC_STATUS   sts = BC_STS_SUCCESS;
456
457	if (!hw) {
458		BCMLOG_ERR("Invalid Arg!!\n");
459		return BC_STS_INV_ARG;
460	}
461
462	crystalhd_hw_create_ioq(sts, hw, hw->tx_freeq,
463			      crystalhd_tx_desc_rel_call_back);
464	crystalhd_hw_create_ioq(sts, hw, hw->tx_actq,
465			      crystalhd_tx_desc_rel_call_back);
466
467	crystalhd_hw_create_ioq(sts, hw, hw->rx_freeq,
468			      crystalhd_rx_pkt_rel_call_back);
469	crystalhd_hw_create_ioq(sts, hw, hw->rx_rdyq,
470			      crystalhd_rx_pkt_rel_call_back);
471	crystalhd_hw_create_ioq(sts, hw, hw->rx_actq,
472			      crystalhd_rx_pkt_rel_call_back);
473
474	return sts;
475
476hw_create_ioq_err:
477	crystalhd_hw_delete_ioqs(hw);
478
479	return sts;
480}
481
482
483static bool crystalhd_code_in_full(struct crystalhd_adp *adp, uint32_t needed_sz,
484				 bool b_188_byte_pkts,  uint8_t flags)
485{
486	uint32_t base, end, writep, readp;
487	uint32_t cpbSize, cpbFullness, fifoSize;
488
489	if (flags & 0x02) { /* ASF Bit is set */
490		base   = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Base);
491		end    = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2End);
492		writep = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Wrptr);
493		readp  = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Rdptr);
494	} else if (b_188_byte_pkts) { /*Encrypted 188 byte packets*/
495		base   = bc_dec_reg_rd(adp, REG_Dec_TsUser0Base);
496		end    = bc_dec_reg_rd(adp, REG_Dec_TsUser0End);
497		writep = bc_dec_reg_rd(adp, REG_Dec_TsUser0Wrptr);
498		readp  = bc_dec_reg_rd(adp, REG_Dec_TsUser0Rdptr);
499	} else {
500		base   = bc_dec_reg_rd(adp, REG_DecCA_RegCinBase);
501		end    = bc_dec_reg_rd(adp, REG_DecCA_RegCinEnd);
502		writep = bc_dec_reg_rd(adp, REG_DecCA_RegCinWrPtr);
503		readp  = bc_dec_reg_rd(adp, REG_DecCA_RegCinRdPtr);
504	}
505
506	cpbSize = end - base;
507	if (writep >= readp)
508		cpbFullness = writep - readp;
509	else
510		cpbFullness = (end - base) - (readp - writep);
511
512	fifoSize = cpbSize - cpbFullness;
513
514	if (fifoSize < BC_INFIFO_THRESHOLD)
515		return true;
516
517	if (needed_sz > (fifoSize - BC_INFIFO_THRESHOLD))
518		return true;
519
520	return false;
521}
522
523static enum BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw,
524					    uint32_t list_id, enum BC_STATUS cs)
525{
526	struct tx_dma_pkt *tx_req;
527
528	if (!hw || !list_id) {
529		BCMLOG_ERR("Invalid Arg..\n");
530		return BC_STS_INV_ARG;
531	}
532
533	hw->pwr_lock--;
534
535	tx_req = (struct tx_dma_pkt *)crystalhd_dioq_find_and_fetch(hw->tx_actq, list_id);
536	if (!tx_req) {
537		if (cs != BC_STS_IO_USER_ABORT)
538			BCMLOG_ERR("Find and Fetch Did not find req\n");
539		return BC_STS_NO_DATA;
540	}
541
542	if (tx_req->call_back) {
543		tx_req->call_back(tx_req->dio_req, tx_req->cb_event, cs);
544		tx_req->dio_req   = NULL;
545		tx_req->cb_event  = NULL;
546		tx_req->call_back = NULL;
547	} else {
548		BCMLOG(BCMLOG_DBG, "Missing Tx Callback - %X\n",
549		       tx_req->list_tag);
550	}
551
552	/* Now put back the tx_list back in FreeQ */
553	tx_req->list_tag = 0;
554
555	return crystalhd_dioq_add(hw->tx_freeq, tx_req, false, 0);
556}
557
558static bool crystalhd_tx_list0_handler(struct crystalhd_hw *hw, uint32_t err_sts)
559{
560	uint32_t err_mask, tmp;
561	unsigned long flags = 0;
562
563	err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_MASK |
564		MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_MASK |
565		MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
566
567	if (!(err_sts & err_mask))
568		return false;
569
570	BCMLOG_ERR("Error on Tx-L0 %x\n", err_sts);
571
572	tmp = err_mask;
573
574	if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK)
575		tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
576
577	if (tmp) {
578		spin_lock_irqsave(&hw->lock, flags);
579		/* reset list index.*/
580		hw->tx_list_post_index = 0;
581		spin_unlock_irqrestore(&hw->lock, flags);
582	}
583
584	tmp = err_sts & err_mask;
585	crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
586
587	return true;
588}
589
590static bool crystalhd_tx_list1_handler(struct crystalhd_hw *hw, uint32_t err_sts)
591{
592	uint32_t err_mask, tmp;
593	unsigned long flags = 0;
594
595	err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_MASK |
596		MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_MASK |
597		MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
598
599	if (!(err_sts & err_mask))
600		return false;
601
602	BCMLOG_ERR("Error on Tx-L1 %x\n", err_sts);
603
604	tmp = err_mask;
605
606	if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK)
607		tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
608
609	if (tmp) {
610		spin_lock_irqsave(&hw->lock, flags);
611		/* reset list index.*/
612		hw->tx_list_post_index = 0;
613		spin_unlock_irqrestore(&hw->lock, flags);
614	}
615
616	tmp = err_sts & err_mask;
617	crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
618
619	return true;
620}
621
622static void crystalhd_tx_isr(struct crystalhd_hw *hw, uint32_t int_sts)
623{
624	uint32_t err_sts;
625
626	if (int_sts & INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_MASK)
627		crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
628					   BC_STS_SUCCESS);
629
630	if (int_sts & INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_MASK)
631		crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
632					   BC_STS_SUCCESS);
633
634	if (!(int_sts & (INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_MASK |
635			INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_MASK))) {
636			/* No error mask set.. */
637			return;
638	}
639
640	/* Handle Tx errors. */
641	err_sts = crystalhd_reg_rd(hw->adp, MISC1_TX_DMA_ERROR_STATUS);
642
643	if (crystalhd_tx_list0_handler(hw, err_sts))
644		crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
645					   BC_STS_ERROR);
646
647	if (crystalhd_tx_list1_handler(hw, err_sts))
648		crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
649					   BC_STS_ERROR);
650
651	hw->stats.tx_errors++;
652}
653
654static void crystalhd_hw_dump_desc(struct dma_descriptor *p_dma_desc,
655				 uint32_t ul_desc_index, uint32_t cnt)
656{
657	uint32_t ix, ll = 0;
658
659	if (!p_dma_desc || !cnt)
660		return;
661
662	if (!ll)
663		return;
664
665	for (ix = ul_desc_index; ix < (ul_desc_index + cnt); ix++) {
666		BCMLOG(ll, "%s[%d] Buff[%x:%x] Next:[%x:%x] XferSz:%x Intr:%x,Last:%x\n",
667		       ((p_dma_desc[ul_desc_index].dma_dir) ? "TDesc" : "RDesc"),
668		       ul_desc_index,
669		       p_dma_desc[ul_desc_index].buff_addr_high,
670		       p_dma_desc[ul_desc_index].buff_addr_low,
671		       p_dma_desc[ul_desc_index].next_desc_addr_high,
672		       p_dma_desc[ul_desc_index].next_desc_addr_low,
673		       p_dma_desc[ul_desc_index].xfer_size,
674		       p_dma_desc[ul_desc_index].intr_enable,
675		       p_dma_desc[ul_desc_index].last_rec_indicator);
676	}
677
678}
679
680static enum BC_STATUS crystalhd_hw_fill_desc(struct crystalhd_dio_req *ioreq,
681				      struct dma_descriptor *desc,
682				      dma_addr_t desc_paddr_base,
683				      uint32_t sg_cnt, uint32_t sg_st_ix,
684				      uint32_t sg_st_off, uint32_t xfr_sz)
685{
686	uint32_t count = 0, ix = 0, sg_ix = 0, len = 0, last_desc_ix = 0;
687	dma_addr_t desc_phy_addr = desc_paddr_base;
688	union addr_64 addr_temp;
689
690	if (!ioreq || !desc || !desc_paddr_base || !xfr_sz ||
691	    (!sg_cnt && !ioreq->uinfo.dir_tx)) {
692		BCMLOG_ERR("Invalid Args\n");
693		return BC_STS_INV_ARG;
694	}
695
696	for (ix = 0; ix < sg_cnt; ix++) {
697
698		/* Setup SGLE index. */
699		sg_ix = ix + sg_st_ix;
700
701		/* Get SGLE length */
702		len = crystalhd_get_sgle_len(ioreq, sg_ix);
703		if (len % 4) {
704			BCMLOG_ERR(" len in sg %d %d %d\n", len, sg_ix, sg_cnt);
705			return BC_STS_NOT_IMPL;
706		}
707		/* Setup DMA desc with Phy addr & Length at current index. */
708		addr_temp.full_addr = crystalhd_get_sgle_paddr(ioreq, sg_ix);
709		if (sg_ix == sg_st_ix) {
710			addr_temp.full_addr += sg_st_off;
711			len -= sg_st_off;
712		}
713		memset(&desc[ix], 0, sizeof(desc[ix]));
714		desc[ix].buff_addr_low  = addr_temp.low_part;
715		desc[ix].buff_addr_high = addr_temp.high_part;
716		desc[ix].dma_dir        = ioreq->uinfo.dir_tx;
717
718		/* Chain DMA descriptor.  */
719		addr_temp.full_addr = desc_phy_addr + sizeof(struct dma_descriptor);
720		desc[ix].next_desc_addr_low = addr_temp.low_part;
721		desc[ix].next_desc_addr_high = addr_temp.high_part;
722
723		if ((count + len) > xfr_sz)
724			len = xfr_sz - count;
725
726		/* Debug.. */
727		if ((!len) || (len > crystalhd_get_sgle_len(ioreq, sg_ix))) {
728			BCMLOG_ERR("inv-len(%x) Ix(%d) count:%x xfr_sz:%x sg_cnt:%d\n",
729				   len, ix, count, xfr_sz, sg_cnt);
730			return BC_STS_ERROR;
731		}
732		/* Length expects Multiple of 4 */
733		desc[ix].xfer_size = (len / 4);
734
735		crystalhd_hw_dump_desc(desc, ix, 1);
736
737		count += len;
738		desc_phy_addr += sizeof(struct dma_descriptor);
739	}
740
741	last_desc_ix = ix - 1;
742
743	if (ioreq->fb_size) {
744		memset(&desc[ix], 0, sizeof(desc[ix]));
745		addr_temp.full_addr     = ioreq->fb_pa;
746		desc[ix].buff_addr_low  = addr_temp.low_part;
747		desc[ix].buff_addr_high = addr_temp.high_part;
748		desc[ix].dma_dir        = ioreq->uinfo.dir_tx;
749		desc[ix].xfer_size	= 1;
750		desc[ix].fill_bytes	= 4 - ioreq->fb_size;
751		count += ioreq->fb_size;
752		last_desc_ix++;
753	}
754
755	/* setup last descriptor..*/
756	desc[last_desc_ix].last_rec_indicator  = 1;
757	desc[last_desc_ix].next_desc_addr_low  = 0;
758	desc[last_desc_ix].next_desc_addr_high = 0;
759	desc[last_desc_ix].intr_enable = 1;
760
761	crystalhd_hw_dump_desc(desc, last_desc_ix, 1);
762
763	if (count != xfr_sz) {
764		BCMLOG_ERR("interal error sz curr:%x exp:%x\n", count, xfr_sz);
765		return BC_STS_ERROR;
766	}
767
768	return BC_STS_SUCCESS;
769}
770
771static enum BC_STATUS crystalhd_xlat_sgl_to_dma_desc(struct crystalhd_dio_req *ioreq,
772					      struct dma_desc_mem *pdesc_mem,
773					      uint32_t *uv_desc_index)
774{
775	struct dma_descriptor *desc = NULL;
776	dma_addr_t desc_paddr_base = 0;
777	uint32_t sg_cnt = 0, sg_st_ix = 0, sg_st_off = 0;
778	uint32_t xfr_sz = 0;
779	enum BC_STATUS sts = BC_STS_SUCCESS;
780
781	/* Check params.. */
782	if (!ioreq || !pdesc_mem || !uv_desc_index) {
783		BCMLOG_ERR("Invalid Args\n");
784		return BC_STS_INV_ARG;
785	}
786
787	if (!pdesc_mem->sz || !pdesc_mem->pdma_desc_start ||
788	    !ioreq->sg || (!ioreq->sg_cnt && !ioreq->uinfo.dir_tx)) {
789		BCMLOG_ERR("Invalid Args\n");
790		return BC_STS_INV_ARG;
791	}
792
793	if ((ioreq->uinfo.dir_tx) && (ioreq->uinfo.uv_offset)) {
794		BCMLOG_ERR("UV offset for TX??\n");
795		return BC_STS_INV_ARG;
796
797	}
798
799	desc = pdesc_mem->pdma_desc_start;
800	desc_paddr_base = pdesc_mem->phy_addr;
801
802	if (ioreq->uinfo.dir_tx || (ioreq->uinfo.uv_offset == 0)) {
803		sg_cnt = ioreq->sg_cnt;
804		xfr_sz = ioreq->uinfo.xfr_len;
805	} else {
806		sg_cnt = ioreq->uinfo.uv_sg_ix + 1;
807		xfr_sz = ioreq->uinfo.uv_offset;
808	}
809
810	sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
811				   sg_st_ix, sg_st_off, xfr_sz);
812
813	if ((sts != BC_STS_SUCCESS) || !ioreq->uinfo.uv_offset)
814		return sts;
815
816	/* Prepare for UV mapping.. */
817	desc = &pdesc_mem->pdma_desc_start[sg_cnt];
818	desc_paddr_base = pdesc_mem->phy_addr +
819			  (sg_cnt * sizeof(struct dma_descriptor));
820
821	/* Done with desc addr.. now update sg stuff.*/
822	sg_cnt    = ioreq->sg_cnt - ioreq->uinfo.uv_sg_ix;
823	xfr_sz    = ioreq->uinfo.xfr_len - ioreq->uinfo.uv_offset;
824	sg_st_ix  = ioreq->uinfo.uv_sg_ix;
825	sg_st_off = ioreq->uinfo.uv_sg_off;
826
827	sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
828				   sg_st_ix, sg_st_off, xfr_sz);
829	if (sts != BC_STS_SUCCESS)
830		return sts;
831
832	*uv_desc_index = sg_st_ix;
833
834	return sts;
835}
836
837static void crystalhd_start_tx_dma_engine(struct crystalhd_hw *hw)
838{
839	uint32_t dma_cntrl;
840
841	dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
842	if (!(dma_cntrl & DMA_START_BIT)) {
843		dma_cntrl |= DMA_START_BIT;
844		crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS,
845			       dma_cntrl);
846	}
847
848	return;
849}
850
851/* _CHECK_THIS_
852 *
853 * Verify if the Stop generates a completion interrupt or not.
854 * if it does not generate an interrupt, then add polling here.
855 */
856static enum BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw)
857{
858	uint32_t dma_cntrl, cnt = 30;
859	uint32_t l1 = 1, l2 = 1;
860	unsigned long flags = 0;
861
862	dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
863
864	BCMLOG(BCMLOG_DBG, "Stopping TX DMA Engine..\n");
865
866	if (!dma_cntrl & DMA_START_BIT) {
867		BCMLOG(BCMLOG_DBG, "Already Stopped\n");
868		return BC_STS_SUCCESS;
869	}
870
871	crystalhd_disable_interrupts(hw->adp);
872
873	/* Issue stop to HW */
874	/* This bit when set gave problems. Please check*/
875	dma_cntrl &= ~DMA_START_BIT;
876	crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
877
878	BCMLOG(BCMLOG_DBG, "Cleared the DMA Start bit\n");
879
880	/* Poll for 3seconds (30 * 100ms) on both the lists..*/
881	while ((l1 || l2) && cnt) {
882
883		if (l1) {
884			l1 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST0);
885			l1 &= DMA_START_BIT;
886		}
887
888		if (l2) {
889			l2 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST1);
890			l2 &= DMA_START_BIT;
891		}
892
893		msleep_interruptible(100);
894
895		cnt--;
896	}
897
898	if (!cnt) {
899		BCMLOG_ERR("Failed to stop TX DMA.. l1 %d, l2 %d\n", l1, l2);
900		crystalhd_enable_interrupts(hw->adp);
901		return BC_STS_ERROR;
902	}
903
904	spin_lock_irqsave(&hw->lock, flags);
905	hw->tx_list_post_index = 0;
906	spin_unlock_irqrestore(&hw->lock, flags);
907	BCMLOG(BCMLOG_DBG, "stopped TX DMA..\n");
908	crystalhd_enable_interrupts(hw->adp);
909
910	return BC_STS_SUCCESS;
911}
912
913static uint32_t crystalhd_get_pib_avail_cnt(struct crystalhd_hw *hw)
914{
915	/*
916	* Position of the PIB Entries can be found at
917	* 0th and the 1st location of the Circular list.
918	*/
919	uint32_t Q_addr;
920	uint32_t pib_cnt, r_offset, w_offset;
921
922	Q_addr = hw->pib_del_Q_addr;
923
924	/* Get the Read Pointer */
925	crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
926
927	/* Get the Write Pointer */
928	crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
929
930	if (r_offset == w_offset)
931		return 0;	/* Queue is empty */
932
933	if (w_offset > r_offset)
934		pib_cnt = w_offset - r_offset;
935	else
936		pib_cnt = (w_offset + MAX_PIB_Q_DEPTH) -
937			  (r_offset + MIN_PIB_Q_DEPTH);
938
939	if (pib_cnt > MAX_PIB_Q_DEPTH) {
940		BCMLOG_ERR("Invalid PIB Count (%u)\n", pib_cnt);
941		return 0;
942	}
943
944	return pib_cnt;
945}
946
947static uint32_t crystalhd_get_addr_from_pib_Q(struct crystalhd_hw *hw)
948{
949	uint32_t Q_addr;
950	uint32_t addr_entry, r_offset, w_offset;
951
952	Q_addr = hw->pib_del_Q_addr;
953
954	/* Get the Read Pointer 0Th Location is Read Pointer */
955	crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
956
957	/* Get the Write Pointer 1st Location is Write pointer */
958	crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
959
960	/* Queue is empty */
961	if (r_offset == w_offset)
962		return 0;
963
964	if ((r_offset < MIN_PIB_Q_DEPTH) || (r_offset >= MAX_PIB_Q_DEPTH))
965		return 0;
966
967	/* Get the Actual Address of the PIB */
968	crystalhd_mem_rd(hw->adp, Q_addr + (r_offset * sizeof(uint32_t)),
969		       1, &addr_entry);
970
971	/* Increment the Read Pointer */
972	r_offset++;
973
974	if (MAX_PIB_Q_DEPTH == r_offset)
975		r_offset = MIN_PIB_Q_DEPTH;
976
977	/* Write back the read pointer to It's Location */
978	crystalhd_mem_wr(hw->adp, Q_addr, 1, &r_offset);
979
980	return addr_entry;
981}
982
983static bool crystalhd_rel_addr_to_pib_Q(struct crystalhd_hw *hw, uint32_t addr_to_rel)
984{
985	uint32_t Q_addr;
986	uint32_t r_offset, w_offset, n_offset;
987
988	Q_addr = hw->pib_rel_Q_addr;
989
990	/* Get the Read Pointer */
991	crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
992
993	/* Get the Write Pointer */
994	crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
995
996	if ((r_offset < MIN_PIB_Q_DEPTH) ||
997	    (r_offset >= MAX_PIB_Q_DEPTH))
998		return false;
999
1000	n_offset = w_offset + 1;
1001
1002	if (MAX_PIB_Q_DEPTH == n_offset)
1003		n_offset = MIN_PIB_Q_DEPTH;
1004
1005	if (r_offset == n_offset)
1006		return false; /* should never happen */
1007
1008	/* Write the DRAM ADDR to the Queue at Next Offset */
1009	crystalhd_mem_wr(hw->adp, Q_addr + (w_offset * sizeof(uint32_t)),
1010		       1, &addr_to_rel);
1011
1012	/* Put the New value of the write pointer in Queue */
1013	crystalhd_mem_wr(hw->adp, Q_addr + sizeof(uint32_t), 1, &n_offset);
1014
1015	return true;
1016}
1017
1018static void cpy_pib_to_app(struct c011_pib *src_pib, struct BC_PIC_INFO_BLOCK *dst_pib)
1019{
1020	if (!src_pib || !dst_pib) {
1021		BCMLOG_ERR("Invalid Arguments\n");
1022		return;
1023	}
1024
1025	dst_pib->timeStamp           = 0;
1026	dst_pib->picture_number      = src_pib->ppb.picture_number;
1027	dst_pib->width               = src_pib->ppb.width;
1028	dst_pib->height              = src_pib->ppb.height;
1029	dst_pib->chroma_format       = src_pib->ppb.chroma_format;
1030	dst_pib->pulldown            = src_pib->ppb.pulldown;
1031	dst_pib->flags               = src_pib->ppb.flags;
1032	dst_pib->sess_num            = src_pib->ptsStcOffset;
1033	dst_pib->aspect_ratio        = src_pib->ppb.aspect_ratio;
1034	dst_pib->colour_primaries     = src_pib->ppb.colour_primaries;
1035	dst_pib->picture_meta_payload = src_pib->ppb.picture_meta_payload;
1036	dst_pib->frame_rate		= src_pib->resolution ;
1037	return;
1038}
1039
1040static void crystalhd_hw_proc_pib(struct crystalhd_hw *hw)
1041{
1042	unsigned int cnt;
1043	struct c011_pib src_pib;
1044	uint32_t pib_addr, pib_cnt;
1045	struct BC_PIC_INFO_BLOCK *AppPib;
1046	struct crystalhd_rx_dma_pkt *rx_pkt = NULL;
1047
1048	pib_cnt = crystalhd_get_pib_avail_cnt(hw);
1049
1050	if (!pib_cnt)
1051		return;
1052
1053	for (cnt = 0; cnt < pib_cnt; cnt++) {
1054
1055		pib_addr = crystalhd_get_addr_from_pib_Q(hw);
1056		crystalhd_mem_rd(hw->adp, pib_addr, sizeof(struct c011_pib) / 4,
1057			       (uint32_t *)&src_pib);
1058
1059		if (src_pib.bFormatChange) {
1060			rx_pkt = (struct crystalhd_rx_dma_pkt *)crystalhd_dioq_fetch(hw->rx_freeq);
1061			if (!rx_pkt)
1062				return;
1063			rx_pkt->flags = 0;
1064			rx_pkt->flags |= COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE;
1065			AppPib = &rx_pkt->pib;
1066			cpy_pib_to_app(&src_pib, AppPib);
1067
1068			BCMLOG(BCMLOG_DBG,
1069			       "App PIB:%x %x %x %x %x %x %x %x %x %x\n",
1070			       rx_pkt->pib.picture_number,
1071			       rx_pkt->pib.aspect_ratio,
1072			       rx_pkt->pib.chroma_format,
1073			       rx_pkt->pib.colour_primaries,
1074			       rx_pkt->pib.frame_rate,
1075			       rx_pkt->pib.height,
1076			       rx_pkt->pib.height,
1077			       rx_pkt->pib.n_drop,
1078			       rx_pkt->pib.pulldown,
1079			       rx_pkt->pib.ycom);
1080
1081			crystalhd_dioq_add(hw->rx_rdyq, (void *)rx_pkt, true, rx_pkt->pkt_tag);
1082
1083		}
1084
1085		crystalhd_rel_addr_to_pib_Q(hw, pib_addr);
1086	}
1087}
1088
1089static void crystalhd_start_rx_dma_engine(struct crystalhd_hw *hw)
1090{
1091	uint32_t        dma_cntrl;
1092
1093	dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1094	if (!(dma_cntrl & DMA_START_BIT)) {
1095		dma_cntrl |= DMA_START_BIT;
1096		crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1097	}
1098
1099	dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1100	if (!(dma_cntrl & DMA_START_BIT)) {
1101		dma_cntrl |= DMA_START_BIT;
1102		crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1103	}
1104
1105	return;
1106}
1107
1108static void crystalhd_stop_rx_dma_engine(struct crystalhd_hw *hw)
1109{
1110	uint32_t dma_cntrl = 0, count = 30;
1111	uint32_t l0y = 1, l0uv = 1, l1y = 1, l1uv = 1;
1112
1113	dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1114	if ((dma_cntrl & DMA_START_BIT)) {
1115		dma_cntrl &= ~DMA_START_BIT;
1116		crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1117	}
1118
1119	dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1120	if ((dma_cntrl & DMA_START_BIT)) {
1121		dma_cntrl &= ~DMA_START_BIT;
1122		crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1123	}
1124
1125	/* Poll for 3seconds (30 * 100ms) on both the lists..*/
1126	while ((l0y || l0uv || l1y || l1uv) && count) {
1127
1128		if (l0y) {
1129			l0y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0);
1130			l0y &= DMA_START_BIT;
1131			if (!l0y)
1132				hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1133		}
1134
1135		if (l1y) {
1136			l1y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1);
1137			l1y &= DMA_START_BIT;
1138			if (!l1y)
1139				hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1140		}
1141
1142		if (l0uv) {
1143			l0uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0);
1144			l0uv &= DMA_START_BIT;
1145			if (!l0uv)
1146				hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1147		}
1148
1149		if (l1uv) {
1150			l1uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1);
1151			l1uv &= DMA_START_BIT;
1152			if (!l1uv)
1153				hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1154		}
1155		msleep_interruptible(100);
1156		count--;
1157	}
1158
1159	hw->rx_list_post_index = 0;
1160
1161	BCMLOG(BCMLOG_SSTEP, "Capture Stop: %d List0:Sts:%x List1:Sts:%x\n",
1162	       count, hw->rx_list_sts[0], hw->rx_list_sts[1]);
1163}
1164
1165static enum BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw, struct crystalhd_rx_dma_pkt *rx_pkt)
1166{
1167	uint32_t y_low_addr_reg, y_high_addr_reg;
1168	uint32_t uv_low_addr_reg, uv_high_addr_reg;
1169	union addr_64 desc_addr;
1170	unsigned long flags;
1171
1172	if (!hw || !rx_pkt) {
1173		BCMLOG_ERR("Invalid Arguments\n");
1174		return BC_STS_INV_ARG;
1175	}
1176
1177	if (hw->rx_list_post_index >= DMA_ENGINE_CNT) {
1178		BCMLOG_ERR("List Out Of bounds %x\n", hw->rx_list_post_index);
1179		return BC_STS_INV_ARG;
1180	}
1181
1182	spin_lock_irqsave(&hw->rx_lock, flags);
1183	if (sts_free != hw->rx_list_sts[hw->rx_list_post_index]) {
1184		spin_unlock_irqrestore(&hw->rx_lock, flags);
1185		return BC_STS_BUSY;
1186	}
1187
1188	if (!hw->rx_list_post_index) {
1189		y_low_addr_reg   = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0;
1190		y_high_addr_reg  = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST0;
1191		uv_low_addr_reg  = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0;
1192		uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST0;
1193	} else {
1194		y_low_addr_reg   = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1;
1195		y_high_addr_reg  = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST1;
1196		uv_low_addr_reg  = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1;
1197		uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST1;
1198	}
1199	rx_pkt->pkt_tag = hw->rx_pkt_tag_seed + hw->rx_list_post_index;
1200	hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_y_intr;
1201	if (rx_pkt->uv_phy_addr)
1202		hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_uv_intr;
1203	hw->rx_list_post_index = (hw->rx_list_post_index + 1) % DMA_ENGINE_CNT;
1204	spin_unlock_irqrestore(&hw->rx_lock, flags);
1205
1206	crystalhd_dioq_add(hw->rx_actq, (void *)rx_pkt, false, rx_pkt->pkt_tag);
1207
1208	crystalhd_start_rx_dma_engine(hw);
1209	/* Program the Y descriptor */
1210	desc_addr.full_addr = rx_pkt->desc_mem.phy_addr;
1211	crystalhd_reg_wr(hw->adp, y_high_addr_reg, desc_addr.high_part);
1212	crystalhd_reg_wr(hw->adp, y_low_addr_reg, desc_addr.low_part | 0x01);
1213
1214	if (rx_pkt->uv_phy_addr) {
1215		/* Program the UV descriptor */
1216		desc_addr.full_addr = rx_pkt->uv_phy_addr;
1217		crystalhd_reg_wr(hw->adp, uv_high_addr_reg, desc_addr.high_part);
1218		crystalhd_reg_wr(hw->adp, uv_low_addr_reg, desc_addr.low_part | 0x01);
1219	}
1220
1221	return BC_STS_SUCCESS;
1222}
1223
1224static enum BC_STATUS crystalhd_hw_post_cap_buff(struct crystalhd_hw *hw,
1225					  struct crystalhd_rx_dma_pkt *rx_pkt)
1226{
1227	enum BC_STATUS sts = crystalhd_hw_prog_rxdma(hw, rx_pkt);
1228
1229	if (sts == BC_STS_BUSY)
1230		crystalhd_dioq_add(hw->rx_freeq, (void *)rx_pkt,
1231				 false, rx_pkt->pkt_tag);
1232
1233	return sts;
1234}
1235
1236static void crystalhd_get_dnsz(struct crystalhd_hw *hw, uint32_t list_index,
1237			     uint32_t *y_dw_dnsz, uint32_t *uv_dw_dnsz)
1238{
1239	uint32_t y_dn_sz_reg, uv_dn_sz_reg;
1240
1241	if (!list_index) {
1242		y_dn_sz_reg  = MISC1_Y_RX_LIST0_CUR_BYTE_CNT;
1243		uv_dn_sz_reg = MISC1_UV_RX_LIST0_CUR_BYTE_CNT;
1244	} else {
1245		y_dn_sz_reg  = MISC1_Y_RX_LIST1_CUR_BYTE_CNT;
1246		uv_dn_sz_reg = MISC1_UV_RX_LIST1_CUR_BYTE_CNT;
1247	}
1248
1249	*y_dw_dnsz  = crystalhd_reg_rd(hw->adp, y_dn_sz_reg);
1250	*uv_dw_dnsz = crystalhd_reg_rd(hw->adp, uv_dn_sz_reg);
1251}
1252
1253/*
1254 * This function should be called only after making sure that the two DMA
1255 * lists are free. This function does not check if DMA's are active, before
1256 * turning off the DMA.
1257 */
1258static void crystalhd_hw_finalize_pause(struct crystalhd_hw *hw)
1259{
1260	uint32_t dma_cntrl, aspm;
1261
1262	hw->stop_pending = 0;
1263
1264	dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1265	if (dma_cntrl & DMA_START_BIT) {
1266		dma_cntrl &= ~DMA_START_BIT;
1267		crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1268	}
1269
1270	dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1271	if (dma_cntrl & DMA_START_BIT) {
1272		dma_cntrl &= ~DMA_START_BIT;
1273		crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1274	}
1275	hw->rx_list_post_index = 0;
1276
1277	aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
1278	aspm |= ASPM_L1_ENABLE;
1279	/* NAREN BCMLOG(BCMLOG_INFO, "aspm on\n"); */
1280	crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
1281}
1282
1283static enum BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw, uint32_t list_index,
1284				     enum BC_STATUS comp_sts)
1285{
1286	struct crystalhd_rx_dma_pkt *rx_pkt = NULL;
1287	uint32_t y_dw_dnsz, uv_dw_dnsz;
1288	enum BC_STATUS sts = BC_STS_SUCCESS;
1289
1290	if (!hw || list_index >= DMA_ENGINE_CNT) {
1291		BCMLOG_ERR("Invalid Arguments\n");
1292		return BC_STS_INV_ARG;
1293	}
1294
1295	rx_pkt = crystalhd_dioq_find_and_fetch(hw->rx_actq,
1296					     hw->rx_pkt_tag_seed + list_index);
1297	if (!rx_pkt) {
1298		BCMLOG_ERR("Act-Q:PostIx:%x L0Sts:%x L1Sts:%x current L:%x tag:%x comp:%x\n",
1299			   hw->rx_list_post_index, hw->rx_list_sts[0],
1300			   hw->rx_list_sts[1], list_index,
1301			   hw->rx_pkt_tag_seed + list_index, comp_sts);
1302		return BC_STS_INV_ARG;
1303	}
1304
1305	if (comp_sts == BC_STS_SUCCESS) {
1306		crystalhd_get_dnsz(hw, list_index, &y_dw_dnsz, &uv_dw_dnsz);
1307		rx_pkt->dio_req->uinfo.y_done_sz = y_dw_dnsz;
1308		rx_pkt->flags = COMP_FLAG_DATA_VALID;
1309		if (rx_pkt->uv_phy_addr)
1310			rx_pkt->dio_req->uinfo.uv_done_sz = uv_dw_dnsz;
1311		crystalhd_dioq_add(hw->rx_rdyq, rx_pkt, true,
1312				hw->rx_pkt_tag_seed + list_index);
1313		return sts;
1314	}
1315
1316	/* Check if we can post this DIO again. */
1317	return crystalhd_hw_post_cap_buff(hw, rx_pkt);
1318}
1319
1320static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw, uint32_t int_sts,
1321				     uint32_t y_err_sts, uint32_t uv_err_sts)
1322{
1323	uint32_t tmp;
1324	enum list_sts tmp_lsts;
1325
1326	if (!(y_err_sts & GET_Y0_ERR_MSK) && !(uv_err_sts & GET_UV0_ERR_MSK))
1327		return false;
1328
1329	tmp_lsts = hw->rx_list_sts[0];
1330
1331	/* Y0 - DMA */
1332	tmp = y_err_sts & GET_Y0_ERR_MSK;
1333	if (int_sts & INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK)
1334		hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1335
1336	if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
1337		hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1338		tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1339	}
1340
1341	if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
1342		hw->rx_list_sts[0] &= ~rx_y_mask;
1343		hw->rx_list_sts[0] |= rx_y_error;
1344		tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1345	}
1346
1347	if (tmp) {
1348		hw->rx_list_sts[0] &= ~rx_y_mask;
1349		hw->rx_list_sts[0] |= rx_y_error;
1350		hw->rx_list_post_index = 0;
1351	}
1352
1353	/* UV0 - DMA */
1354	tmp = uv_err_sts & GET_UV0_ERR_MSK;
1355	if (int_sts & INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK)
1356		hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1357
1358	if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
1359		hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1360		tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1361	}
1362
1363	if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
1364		hw->rx_list_sts[0] &= ~rx_uv_mask;
1365		hw->rx_list_sts[0] |= rx_uv_error;
1366		tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1367	}
1368
1369	if (tmp) {
1370		hw->rx_list_sts[0] &= ~rx_uv_mask;
1371		hw->rx_list_sts[0] |= rx_uv_error;
1372		hw->rx_list_post_index = 0;
1373	}
1374
1375	if (y_err_sts & GET_Y0_ERR_MSK) {
1376		tmp = y_err_sts & GET_Y0_ERR_MSK;
1377		crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
1378	}
1379
1380	if (uv_err_sts & GET_UV0_ERR_MSK) {
1381		tmp = uv_err_sts & GET_UV0_ERR_MSK;
1382		crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
1383	}
1384
1385	return (tmp_lsts != hw->rx_list_sts[0]);
1386}
1387
1388static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw, uint32_t int_sts,
1389				     uint32_t y_err_sts, uint32_t uv_err_sts)
1390{
1391	uint32_t tmp;
1392	enum list_sts tmp_lsts;
1393
1394	if (!(y_err_sts & GET_Y1_ERR_MSK) && !(uv_err_sts & GET_UV1_ERR_MSK))
1395		return false;
1396
1397	tmp_lsts = hw->rx_list_sts[1];
1398
1399	/* Y1 - DMA */
1400	tmp = y_err_sts & GET_Y1_ERR_MSK;
1401	if (int_sts & INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK)
1402		hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1403
1404	if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
1405		hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1406		tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1407	}
1408
1409	if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
1410		/* Add retry-support..*/
1411		hw->rx_list_sts[1] &= ~rx_y_mask;
1412		hw->rx_list_sts[1] |= rx_y_error;
1413		tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1414	}
1415
1416	if (tmp) {
1417		hw->rx_list_sts[1] &= ~rx_y_mask;
1418		hw->rx_list_sts[1] |= rx_y_error;
1419		hw->rx_list_post_index = 0;
1420	}
1421
1422	/* UV1 - DMA */
1423	tmp = uv_err_sts & GET_UV1_ERR_MSK;
1424	if (int_sts & INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK)
1425		hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1426
1427	if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
1428		hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1429		tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1430	}
1431
1432	if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
1433		/* Add retry-support*/
1434		hw->rx_list_sts[1] &= ~rx_uv_mask;
1435		hw->rx_list_sts[1] |= rx_uv_error;
1436		tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1437	}
1438
1439	if (tmp) {
1440		hw->rx_list_sts[1] &= ~rx_uv_mask;
1441		hw->rx_list_sts[1] |= rx_uv_error;
1442		hw->rx_list_post_index = 0;
1443	}
1444
1445	if (y_err_sts & GET_Y1_ERR_MSK) {
1446		tmp = y_err_sts & GET_Y1_ERR_MSK;
1447		crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
1448	}
1449
1450	if (uv_err_sts & GET_UV1_ERR_MSK) {
1451		tmp = uv_err_sts & GET_UV1_ERR_MSK;
1452		crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
1453	}
1454
1455	return (tmp_lsts != hw->rx_list_sts[1]);
1456}
1457
1458
1459static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts)
1460{
1461	unsigned long flags;
1462	uint32_t i, list_avail = 0;
1463	enum BC_STATUS comp_sts = BC_STS_NO_DATA;
1464	uint32_t y_err_sts, uv_err_sts, y_dn_sz = 0, uv_dn_sz = 0;
1465	bool ret = 0;
1466
1467	if (!hw) {
1468		BCMLOG_ERR("Invalid Arguments\n");
1469		return;
1470	}
1471
1472	if (!(intr_sts & GET_RX_INTR_MASK))
1473		return;
1474
1475	y_err_sts = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_ERROR_STATUS);
1476	uv_err_sts = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_ERROR_STATUS);
1477
1478	for (i = 0; i < DMA_ENGINE_CNT; i++) {
1479		/* Update States..*/
1480		spin_lock_irqsave(&hw->rx_lock, flags);
1481		if (i == 0)
1482			ret = crystalhd_rx_list0_handler(hw, intr_sts, y_err_sts, uv_err_sts);
1483		else
1484			ret = crystalhd_rx_list1_handler(hw, intr_sts, y_err_sts, uv_err_sts);
1485		if (ret) {
1486			switch (hw->rx_list_sts[i]) {
1487			case sts_free:
1488				comp_sts = BC_STS_SUCCESS;
1489				list_avail = 1;
1490				break;
1491			case rx_y_error:
1492			case rx_uv_error:
1493			case rx_sts_error:
1494				/* We got error on both or Y or uv. */
1495				hw->stats.rx_errors++;
1496				crystalhd_get_dnsz(hw, i, &y_dn_sz, &uv_dn_sz);
1497				BCMLOG(BCMLOG_DBG, "list_index:%x rx[%d] Y:%x "
1498				       "UV:%x Int:%x YDnSz:%x UVDnSz:%x\n",
1499				       i, hw->stats.rx_errors, y_err_sts,
1500				       uv_err_sts, intr_sts, y_dn_sz, uv_dn_sz);
1501				hw->rx_list_sts[i] = sts_free;
1502				comp_sts = BC_STS_ERROR;
1503				break;
1504			default:
1505				/* Wait for completion..*/
1506				comp_sts = BC_STS_NO_DATA;
1507				break;
1508			}
1509		}
1510		spin_unlock_irqrestore(&hw->rx_lock, flags);
1511
1512		/* handle completion...*/
1513		if (comp_sts != BC_STS_NO_DATA) {
1514			crystalhd_rx_pkt_done(hw, i, comp_sts);
1515			comp_sts = BC_STS_NO_DATA;
1516		}
1517	}
1518
1519	if (list_avail) {
1520		if (hw->stop_pending) {
1521			if ((hw->rx_list_sts[0] == sts_free) &&
1522			    (hw->rx_list_sts[1] == sts_free))
1523				crystalhd_hw_finalize_pause(hw);
1524		} else {
1525			crystalhd_hw_start_capture(hw);
1526		}
1527	}
1528}
1529
1530static enum BC_STATUS crystalhd_fw_cmd_post_proc(struct crystalhd_hw *hw,
1531					  struct BC_FW_CMD *fw_cmd)
1532{
1533	enum BC_STATUS sts = BC_STS_SUCCESS;
1534	struct dec_rsp_channel_start_video *st_rsp = NULL;
1535
1536	switch (fw_cmd->cmd[0]) {
1537	case eCMD_C011_DEC_CHAN_START_VIDEO:
1538		st_rsp = (struct dec_rsp_channel_start_video *)fw_cmd->rsp;
1539		hw->pib_del_Q_addr = st_rsp->picInfoDeliveryQ;
1540		hw->pib_rel_Q_addr = st_rsp->picInfoReleaseQ;
1541		BCMLOG(BCMLOG_DBG, "DelQAddr:%x RelQAddr:%x\n",
1542		       hw->pib_del_Q_addr, hw->pib_rel_Q_addr);
1543		break;
1544	case eCMD_C011_INIT:
1545		if (!(crystalhd_load_firmware_config(hw->adp))) {
1546			BCMLOG_ERR("Invalid Params.\n");
1547			sts = BC_STS_FW_AUTH_FAILED;
1548		}
1549		break;
1550	default:
1551		break;
1552	}
1553	return sts;
1554}
1555
1556static enum BC_STATUS crystalhd_put_ddr2sleep(struct crystalhd_hw *hw)
1557{
1558	uint32_t reg;
1559	union link_misc_perst_decoder_ctrl rst_cntrl_reg;
1560
1561	/* Pulse reset pin of 7412 (MISC_PERST_DECODER_CTRL) */
1562	rst_cntrl_reg.whole_reg = crystalhd_reg_rd(hw->adp, MISC_PERST_DECODER_CTRL);
1563
1564	rst_cntrl_reg.bcm_7412_rst = 1;
1565	crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg);
1566	msleep_interruptible(50);
1567
1568	rst_cntrl_reg.bcm_7412_rst = 0;
1569	crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg);
1570
1571	/* Close all banks, put DDR in idle */
1572	bc_dec_reg_wr(hw->adp, SDRAM_PRECHARGE, 0);
1573
1574	/* Set bit 25 (drop CKE pin of DDR) */
1575	reg = bc_dec_reg_rd(hw->adp, SDRAM_PARAM);
1576	reg |= 0x02000000;
1577	bc_dec_reg_wr(hw->adp, SDRAM_PARAM, reg);
1578
1579	/* Reset the audio block */
1580	bc_dec_reg_wr(hw->adp, AUD_DSP_MISC_SOFT_RESET, 0x1);
1581
1582	/* Power down Raptor PLL */
1583	reg = bc_dec_reg_rd(hw->adp, DecHt_PllCCtl);
1584	reg |= 0x00008000;
1585	bc_dec_reg_wr(hw->adp, DecHt_PllCCtl, reg);
1586
1587	/* Power down all Audio PLL */
1588	bc_dec_reg_wr(hw->adp, AIO_MISC_PLL_RESET, 0x1);
1589
1590	/* Power down video clock (75MHz) */
1591	reg = bc_dec_reg_rd(hw->adp, DecHt_PllECtl);
1592	reg |= 0x00008000;
1593	bc_dec_reg_wr(hw->adp, DecHt_PllECtl, reg);
1594
1595	/* Power down video clock (75MHz) */
1596	reg = bc_dec_reg_rd(hw->adp, DecHt_PllDCtl);
1597	reg |= 0x00008000;
1598	bc_dec_reg_wr(hw->adp, DecHt_PllDCtl, reg);
1599
1600	/* Power down core clock (200MHz) */
1601	reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
1602	reg |= 0x00008000;
1603	bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
1604
1605	/* Power down core clock (200MHz) */
1606	reg = bc_dec_reg_rd(hw->adp, DecHt_PllBCtl);
1607	reg |= 0x00008000;
1608	bc_dec_reg_wr(hw->adp, DecHt_PllBCtl, reg);
1609
1610	return BC_STS_SUCCESS;
1611}
1612
1613/************************************************
1614**
1615*************************************************/
1616
1617enum BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer, uint32_t sz)
1618{
1619	uint32_t reg_data, cnt, *temp_buff;
1620	uint32_t fw_sig_len = 36;
1621	uint32_t dram_offset = BC_FWIMG_ST_ADDR, sig_reg;
1622
1623	BCMLOG_ENTER;
1624
1625	if (!adp || !buffer || !sz) {
1626		BCMLOG_ERR("Invalid Params.\n");
1627		return BC_STS_INV_ARG;
1628	}
1629
1630	reg_data = crystalhd_reg_rd(adp, OTP_CMD);
1631	if (!(reg_data & 0x02)) {
1632		BCMLOG_ERR("Invalid hw config.. otp not programmed\n");
1633		return BC_STS_ERROR;
1634	}
1635
1636	reg_data = 0;
1637	crystalhd_reg_wr(adp, DCI_CMD, 0);
1638	reg_data |= BC_BIT(0);
1639	crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1640
1641	reg_data = 0;
1642	cnt = 1000;
1643	msleep_interruptible(10);
1644
1645	while (reg_data != BC_BIT(4)) {
1646		reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1647		reg_data &= BC_BIT(4);
1648		if (--cnt == 0) {
1649			BCMLOG_ERR("Firmware Download RDY Timeout.\n");
1650			return BC_STS_TIMEOUT;
1651		}
1652	}
1653
1654	msleep_interruptible(10);
1655	/*  Load the FW to the FW_ADDR field in the DCI_FIRMWARE_ADDR */
1656	crystalhd_reg_wr(adp, DCI_FIRMWARE_ADDR, dram_offset);
1657	temp_buff = (uint32_t *)buffer;
1658	for (cnt = 0; cnt < (sz - fw_sig_len); cnt += 4) {
1659		crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (dram_offset >> 19));
1660		crystalhd_reg_wr(adp, DCI_FIRMWARE_DATA, *temp_buff);
1661		dram_offset += 4;
1662		temp_buff++;
1663	}
1664	msleep_interruptible(10);
1665
1666	temp_buff++;
1667
1668	sig_reg = (uint32_t)DCI_SIGNATURE_DATA_7;
1669	for (cnt = 0; cnt < 8; cnt++) {
1670		uint32_t swapped_data = *temp_buff;
1671		swapped_data = bswap_32_1(swapped_data);
1672		crystalhd_reg_wr(adp, sig_reg, swapped_data);
1673		sig_reg -= 4;
1674		temp_buff++;
1675	}
1676	msleep_interruptible(10);
1677
1678	reg_data = 0;
1679	reg_data |= BC_BIT(1);
1680	crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1681	msleep_interruptible(10);
1682
1683	reg_data = 0;
1684	reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1685
1686	if ((reg_data & BC_BIT(9)) == BC_BIT(9)) {
1687		cnt = 1000;
1688		while ((reg_data & BC_BIT(0)) != BC_BIT(0)) {
1689			reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1690			reg_data &= BC_BIT(0);
1691			if (!(--cnt))
1692				break;
1693			msleep_interruptible(10);
1694		}
1695		reg_data = 0;
1696		reg_data = crystalhd_reg_rd(adp, DCI_CMD);
1697		reg_data |= BC_BIT(4);
1698		crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1699
1700	} else {
1701		BCMLOG_ERR("F/w Signature mismatch\n");
1702		return BC_STS_FW_AUTH_FAILED;
1703	}
1704
1705	BCMLOG(BCMLOG_INFO, "Firmware Downloaded Successfully\n");
1706	return BC_STS_SUCCESS;;
1707}
1708
1709enum BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw,
1710				struct BC_FW_CMD *fw_cmd)
1711{
1712	uint32_t cnt = 0, cmd_res_addr;
1713	uint32_t *cmd_buff, *res_buff;
1714	wait_queue_head_t fw_cmd_event;
1715	int rc = 0;
1716	enum BC_STATUS sts;
1717
1718	crystalhd_create_event(&fw_cmd_event);
1719
1720	BCMLOG_ENTER;
1721
1722	if (!hw || !fw_cmd) {
1723		BCMLOG_ERR("Invalid Arguments\n");
1724		return BC_STS_INV_ARG;
1725	}
1726
1727	cmd_buff = fw_cmd->cmd;
1728	res_buff = fw_cmd->rsp;
1729
1730	if (!cmd_buff || !res_buff) {
1731		BCMLOG_ERR("Invalid Parameters for F/W Command\n");
1732		return BC_STS_INV_ARG;
1733	}
1734
1735	hw->pwr_lock++;
1736
1737	hw->fwcmd_evt_sts = 0;
1738	hw->pfw_cmd_event = &fw_cmd_event;
1739
1740	/*Write the command to the memory*/
1741	crystalhd_mem_wr(hw->adp, TS_Host2CpuSnd, FW_CMD_BUFF_SZ, cmd_buff);
1742
1743	/*Memory Read for memory arbitrator flush*/
1744	crystalhd_mem_rd(hw->adp, TS_Host2CpuSnd, 1, &cnt);
1745
1746	/* Write the command address to mailbox */
1747	bc_dec_reg_wr(hw->adp, Hst2CpuMbx1, TS_Host2CpuSnd);
1748	msleep_interruptible(50);
1749
1750	crystalhd_wait_on_event(&fw_cmd_event, hw->fwcmd_evt_sts, 20000, rc, 0);
1751
1752	if (!rc) {
1753		sts = BC_STS_SUCCESS;
1754	} else if (rc == -EBUSY) {
1755		BCMLOG_ERR("Firmware command T/O\n");
1756		sts = BC_STS_TIMEOUT;
1757	} else if (rc == -EINTR) {
1758		BCMLOG(BCMLOG_DBG, "FwCmd Wait Signal int.\n");
1759		sts = BC_STS_IO_USER_ABORT;
1760	} else {
1761		BCMLOG_ERR("FwCmd IO Error.\n");
1762		sts = BC_STS_IO_ERROR;
1763	}
1764
1765	if (sts != BC_STS_SUCCESS) {
1766		BCMLOG_ERR("FwCmd Failed.\n");
1767		hw->pwr_lock--;
1768		return sts;
1769	}
1770
1771	/*Get the Responce Address*/
1772	cmd_res_addr = bc_dec_reg_rd(hw->adp, Cpu2HstMbx1);
1773
1774	/*Read the Response*/
1775	crystalhd_mem_rd(hw->adp, cmd_res_addr, FW_CMD_BUFF_SZ, res_buff);
1776
1777	hw->pwr_lock--;
1778
1779	if (res_buff[2] != C011_RET_SUCCESS) {
1780		BCMLOG_ERR("res_buff[2] != C011_RET_SUCCESS\n");
1781		return BC_STS_FW_CMD_ERR;
1782	}
1783
1784	sts = crystalhd_fw_cmd_post_proc(hw, fw_cmd);
1785	if (sts != BC_STS_SUCCESS)
1786		BCMLOG_ERR("crystalhd_fw_cmd_post_proc Failed.\n");
1787
1788	return sts;
1789}
1790
1791bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw)
1792{
1793	uint32_t intr_sts = 0;
1794	uint32_t deco_intr = 0;
1795	bool rc = 0;
1796
1797	if (!adp || !hw->dev_started)
1798		return rc;
1799
1800	hw->stats.num_interrupts++;
1801	hw->pwr_lock++;
1802
1803	deco_intr = bc_dec_reg_rd(adp, Stream2Host_Intr_Sts);
1804	intr_sts  = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
1805
1806	if (intr_sts) {
1807		/* let system know we processed interrupt..*/
1808		rc = 1;
1809		hw->stats.dev_interrupts++;
1810	}
1811
1812	if (deco_intr && (deco_intr != 0xdeaddead)) {
1813
1814		if (deco_intr & 0x80000000) {
1815			/*Set the Event and the status flag*/
1816			if (hw->pfw_cmd_event) {
1817				hw->fwcmd_evt_sts = 1;
1818				crystalhd_set_event(hw->pfw_cmd_event);
1819			}
1820		}
1821
1822		if (deco_intr & BC_BIT(1))
1823			crystalhd_hw_proc_pib(hw);
1824
1825		bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, deco_intr);
1826		bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, 0);
1827		rc = 1;
1828	}
1829
1830	/* Rx interrupts */
1831	crystalhd_rx_isr(hw, intr_sts);
1832
1833	/* Tx interrupts*/
1834	crystalhd_tx_isr(hw, intr_sts);
1835
1836	/* Clear interrupts */
1837	if (rc) {
1838		if (intr_sts)
1839			crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
1840
1841		crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
1842	}
1843
1844	hw->pwr_lock--;
1845
1846	return rc;
1847}
1848
1849enum BC_STATUS crystalhd_hw_open(struct crystalhd_hw *hw, struct crystalhd_adp *adp)
1850{
1851	if (!hw || !adp) {
1852		BCMLOG_ERR("Invalid Arguments\n");
1853		return BC_STS_INV_ARG;
1854	}
1855
1856	if (hw->dev_started)
1857		return BC_STS_SUCCESS;
1858
1859	memset(hw, 0, sizeof(struct crystalhd_hw));
1860
1861	hw->adp = adp;
1862	spin_lock_init(&hw->lock);
1863	spin_lock_init(&hw->rx_lock);
1864	hw->tx_ioq_tag_seed = 0x70023070;
1865	hw->rx_pkt_tag_seed = 0x70029070;
1866
1867	hw->stop_pending = 0;
1868	crystalhd_start_device(hw->adp);
1869	hw->dev_started = true;
1870
1871	/* set initial core clock  */
1872	hw->core_clock_mhz = CLOCK_PRESET;
1873	hw->prev_n = 0;
1874	hw->pwr_lock = 0;
1875	crystalhd_hw_set_core_clock(hw);
1876
1877	return BC_STS_SUCCESS;
1878}
1879
1880enum BC_STATUS crystalhd_hw_close(struct crystalhd_hw *hw)
1881{
1882	if (!hw) {
1883		BCMLOG_ERR("Invalid Arguments\n");
1884		return BC_STS_INV_ARG;
1885	}
1886
1887	if (!hw->dev_started)
1888		return BC_STS_SUCCESS;
1889
1890	/* Stop and DDR sleep will happen in here */
1891	crystalhd_hw_suspend(hw);
1892	hw->dev_started = false;
1893
1894	return BC_STS_SUCCESS;
1895}
1896
1897enum BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw)
1898{
1899	unsigned int i;
1900	void *mem;
1901	size_t mem_len;
1902	dma_addr_t phy_addr;
1903	enum BC_STATUS sts = BC_STS_SUCCESS;
1904	struct crystalhd_rx_dma_pkt *rpkt;
1905
1906	if (!hw || !hw->adp) {
1907		BCMLOG_ERR("Invalid Arguments\n");
1908		return BC_STS_INV_ARG;
1909	}
1910
1911	sts = crystalhd_hw_create_ioqs(hw);
1912	if (sts != BC_STS_SUCCESS) {
1913		BCMLOG_ERR("Failed to create IOQs..\n");
1914		return sts;
1915	}
1916
1917	mem_len = BC_LINK_MAX_SGLS * sizeof(struct dma_descriptor);
1918
1919	for (i = 0; i < BC_TX_LIST_CNT; i++) {
1920		mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
1921		if (mem) {
1922			memset(mem, 0, mem_len);
1923		} else {
1924			BCMLOG_ERR("Insufficient Memory For TX\n");
1925			crystalhd_hw_free_dma_rings(hw);
1926			return BC_STS_INSUFF_RES;
1927		}
1928		/* rx_pkt_pool -- static memory allocation  */
1929		hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = mem;
1930		hw->tx_pkt_pool[i].desc_mem.phy_addr = phy_addr;
1931		hw->tx_pkt_pool[i].desc_mem.sz = BC_LINK_MAX_SGLS *
1932						 sizeof(struct dma_descriptor);
1933		hw->tx_pkt_pool[i].list_tag = 0;
1934
1935		/* Add TX dma requests to Free Queue..*/
1936		sts = crystalhd_dioq_add(hw->tx_freeq,
1937				       &hw->tx_pkt_pool[i], false, 0);
1938		if (sts != BC_STS_SUCCESS) {
1939			crystalhd_hw_free_dma_rings(hw);
1940			return sts;
1941		}
1942	}
1943
1944	for (i = 0; i < BC_RX_LIST_CNT; i++) {
1945		rpkt = kzalloc(sizeof(*rpkt), GFP_KERNEL);
1946		if (!rpkt) {
1947			BCMLOG_ERR("Insufficient Memory For RX\n");
1948			crystalhd_hw_free_dma_rings(hw);
1949			return BC_STS_INSUFF_RES;
1950		}
1951
1952		mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
1953		if (mem) {
1954			memset(mem, 0, mem_len);
1955		} else {
1956			BCMLOG_ERR("Insufficient Memory For RX\n");
1957			crystalhd_hw_free_dma_rings(hw);
1958			return BC_STS_INSUFF_RES;
1959		}
1960		rpkt->desc_mem.pdma_desc_start = mem;
1961		rpkt->desc_mem.phy_addr = phy_addr;
1962		rpkt->desc_mem.sz  = BC_LINK_MAX_SGLS * sizeof(struct dma_descriptor);
1963		rpkt->pkt_tag = hw->rx_pkt_tag_seed + i;
1964		crystalhd_hw_free_rx_pkt(hw, rpkt);
1965	}
1966
1967	return BC_STS_SUCCESS;
1968}
1969
1970enum BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *hw)
1971{
1972	unsigned int i;
1973	struct crystalhd_rx_dma_pkt *rpkt = NULL;
1974
1975	if (!hw || !hw->adp) {
1976		BCMLOG_ERR("Invalid Arguments\n");
1977		return BC_STS_INV_ARG;
1978	}
1979
1980	/* Delete all IOQs.. */
1981	crystalhd_hw_delete_ioqs(hw);
1982
1983	for (i = 0; i < BC_TX_LIST_CNT; i++) {
1984		if (hw->tx_pkt_pool[i].desc_mem.pdma_desc_start) {
1985			bc_kern_dma_free(hw->adp,
1986				hw->tx_pkt_pool[i].desc_mem.sz,
1987				hw->tx_pkt_pool[i].desc_mem.pdma_desc_start,
1988				hw->tx_pkt_pool[i].desc_mem.phy_addr);
1989
1990			hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = NULL;
1991		}
1992	}
1993
1994	BCMLOG(BCMLOG_DBG, "Releasing RX Pkt pool\n");
1995	do {
1996		rpkt = crystalhd_hw_alloc_rx_pkt(hw);
1997		if (!rpkt)
1998			break;
1999		bc_kern_dma_free(hw->adp, rpkt->desc_mem.sz,
2000				 rpkt->desc_mem.pdma_desc_start,
2001				 rpkt->desc_mem.phy_addr);
2002		kfree(rpkt);
2003	} while (rpkt);
2004
2005	return BC_STS_SUCCESS;
2006}
2007
2008enum BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, struct crystalhd_dio_req *ioreq,
2009			     hw_comp_callback call_back,
2010			     wait_queue_head_t *cb_event, uint32_t *list_id,
2011			     uint8_t data_flags)
2012{
2013	struct tx_dma_pkt *tx_dma_packet = NULL;
2014	uint32_t first_desc_u_addr, first_desc_l_addr;
2015	uint32_t low_addr, high_addr;
2016	union addr_64 desc_addr;
2017	enum BC_STATUS sts, add_sts;
2018	uint32_t dummy_index = 0;
2019	unsigned long flags;
2020	bool rc;
2021
2022	if (!hw || !ioreq || !call_back || !cb_event || !list_id) {
2023		BCMLOG_ERR("Invalid Arguments\n");
2024		return BC_STS_INV_ARG;
2025	}
2026
2027	/*
2028	 * Since we hit code in busy condition very frequently,
2029	 * we will check the code in status first before
2030	 * checking the availability of free elem.
2031	 *
2032	 * This will avoid the Q fetch/add in normal condition.
2033	 */
2034	rc = crystalhd_code_in_full(hw->adp, ioreq->uinfo.xfr_len,
2035				  false, data_flags);
2036	if (rc) {
2037		hw->stats.cin_busy++;
2038		return BC_STS_BUSY;
2039	}
2040
2041	/* Get a list from TxFreeQ */
2042	tx_dma_packet = (struct tx_dma_pkt *)crystalhd_dioq_fetch(hw->tx_freeq);
2043	if (!tx_dma_packet) {
2044		BCMLOG_ERR("No empty elements..\n");
2045		return BC_STS_ERR_USAGE;
2046	}
2047
2048	sts = crystalhd_xlat_sgl_to_dma_desc(ioreq,
2049					   &tx_dma_packet->desc_mem,
2050					   &dummy_index);
2051	if (sts != BC_STS_SUCCESS) {
2052		add_sts = crystalhd_dioq_add(hw->tx_freeq, tx_dma_packet,
2053					   false, 0);
2054		if (add_sts != BC_STS_SUCCESS)
2055			BCMLOG_ERR("double fault..\n");
2056
2057		return sts;
2058	}
2059
2060	hw->pwr_lock++;
2061
2062	desc_addr.full_addr = tx_dma_packet->desc_mem.phy_addr;
2063	low_addr = desc_addr.low_part;
2064	high_addr = desc_addr.high_part;
2065
2066	tx_dma_packet->call_back = call_back;
2067	tx_dma_packet->cb_event  = cb_event;
2068	tx_dma_packet->dio_req   = ioreq;
2069
2070	spin_lock_irqsave(&hw->lock, flags);
2071
2072	if (hw->tx_list_post_index == 0) {
2073		first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST0;
2074		first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST0;
2075	} else {
2076		first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST1;
2077		first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST1;
2078	}
2079
2080	*list_id = tx_dma_packet->list_tag = hw->tx_ioq_tag_seed +
2081					     hw->tx_list_post_index;
2082
2083	hw->tx_list_post_index = (hw->tx_list_post_index + 1) % DMA_ENGINE_CNT;
2084
2085	spin_unlock_irqrestore(&hw->lock, flags);
2086
2087
2088	/* Insert in Active Q..*/
2089	crystalhd_dioq_add(hw->tx_actq, tx_dma_packet, false,
2090			 tx_dma_packet->list_tag);
2091
2092	/*
2093	 * Interrupt will come as soon as you write
2094	 * the valid bit. So be ready for that. All
2095	 * the initialization should happen before that.
2096	 */
2097	crystalhd_start_tx_dma_engine(hw);
2098	crystalhd_reg_wr(hw->adp, first_desc_u_addr, desc_addr.high_part);
2099
2100	crystalhd_reg_wr(hw->adp, first_desc_l_addr, desc_addr.low_part | 0x01);
2101					/* Be sure we set the valid bit ^^^^ */
2102
2103	return BC_STS_SUCCESS;
2104}
2105
2106/*
2107 * This is a force cancel and we are racing with ISR.
2108 *
2109 * Will try to remove the req from ActQ before ISR gets it.
2110 * If ISR gets it first then the completion happens in the
2111 * normal path and we will return _STS_NO_DATA from here.
2112 *
2113 * FIX_ME: Not Tested the actual condition..
2114 */
2115enum BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id)
2116{
2117	if (!hw || !list_id) {
2118		BCMLOG_ERR("Invalid Arguments\n");
2119		return BC_STS_INV_ARG;
2120	}
2121
2122	crystalhd_stop_tx_dma_engine(hw);
2123	crystalhd_hw_tx_req_complete(hw, list_id, BC_STS_IO_USER_ABORT);
2124
2125	return BC_STS_SUCCESS;
2126}
2127
2128enum BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
2129				    struct crystalhd_dio_req *ioreq, bool en_post)
2130{
2131	struct crystalhd_rx_dma_pkt *rpkt;
2132	uint32_t tag, uv_desc_ix = 0;
2133	enum BC_STATUS sts;
2134
2135	if (!hw || !ioreq) {
2136		BCMLOG_ERR("Invalid Arguments\n");
2137		return BC_STS_INV_ARG;
2138	}
2139
2140	rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2141	if (!rpkt) {
2142		BCMLOG_ERR("Insufficient resources\n");
2143		return BC_STS_INSUFF_RES;
2144	}
2145
2146	rpkt->dio_req = ioreq;
2147	tag = rpkt->pkt_tag;
2148
2149	sts = crystalhd_xlat_sgl_to_dma_desc(ioreq, &rpkt->desc_mem, &uv_desc_ix);
2150	if (sts != BC_STS_SUCCESS)
2151		return sts;
2152
2153	rpkt->uv_phy_addr = 0;
2154
2155	/* Store the address of UV in the rx packet for post*/
2156	if (uv_desc_ix)
2157		rpkt->uv_phy_addr = rpkt->desc_mem.phy_addr +
2158				    (sizeof(struct dma_descriptor) * (uv_desc_ix + 1));
2159
2160	if (en_post)
2161		sts = crystalhd_hw_post_cap_buff(hw, rpkt);
2162	else
2163		sts = crystalhd_dioq_add(hw->rx_freeq, rpkt, false, tag);
2164
2165	return sts;
2166}
2167
2168enum BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw,
2169				    struct BC_PIC_INFO_BLOCK *pib,
2170				    struct crystalhd_dio_req **ioreq)
2171{
2172	struct crystalhd_rx_dma_pkt *rpkt;
2173	uint32_t timeout = BC_PROC_OUTPUT_TIMEOUT / 1000;
2174	uint32_t sig_pending = 0;
2175
2176
2177	if (!hw || !ioreq || !pib) {
2178		BCMLOG_ERR("Invalid Arguments\n");
2179		return BC_STS_INV_ARG;
2180	}
2181
2182	rpkt = crystalhd_dioq_fetch_wait(hw->rx_rdyq, timeout, &sig_pending);
2183	if (!rpkt) {
2184		if (sig_pending) {
2185			BCMLOG(BCMLOG_INFO, "wait on frame time out %d\n", sig_pending);
2186			return BC_STS_IO_USER_ABORT;
2187		} else {
2188			return BC_STS_TIMEOUT;
2189		}
2190	}
2191
2192	rpkt->dio_req->uinfo.comp_flags = rpkt->flags;
2193
2194	if (rpkt->flags & COMP_FLAG_PIB_VALID)
2195		memcpy(pib, &rpkt->pib, sizeof(*pib));
2196
2197	*ioreq = rpkt->dio_req;
2198
2199	crystalhd_hw_free_rx_pkt(hw, rpkt);
2200
2201	return BC_STS_SUCCESS;
2202}
2203
2204enum BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw)
2205{
2206	struct crystalhd_rx_dma_pkt *rx_pkt;
2207	enum BC_STATUS sts;
2208	uint32_t i;
2209
2210	if (!hw) {
2211		BCMLOG_ERR("Invalid Arguments\n");
2212		return BC_STS_INV_ARG;
2213	}
2214
2215	/* This is start of capture.. Post to both the lists.. */
2216	for (i = 0; i < DMA_ENGINE_CNT; i++) {
2217		rx_pkt = crystalhd_dioq_fetch(hw->rx_freeq);
2218		if (!rx_pkt)
2219			return BC_STS_NO_DATA;
2220		sts = crystalhd_hw_post_cap_buff(hw, rx_pkt);
2221		if (BC_STS_SUCCESS != sts)
2222			break;
2223
2224	}
2225
2226	return BC_STS_SUCCESS;
2227}
2228
2229enum BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw)
2230{
2231	void *temp = NULL;
2232
2233	if (!hw) {
2234		BCMLOG_ERR("Invalid Arguments\n");
2235		return BC_STS_INV_ARG;
2236	}
2237
2238	crystalhd_stop_rx_dma_engine(hw);
2239
2240	do {
2241		temp = crystalhd_dioq_fetch(hw->rx_freeq);
2242		if (temp)
2243			crystalhd_rx_pkt_rel_call_back(hw, temp);
2244	} while (temp);
2245
2246	return BC_STS_SUCCESS;
2247}
2248
2249enum BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw)
2250{
2251	hw->stats.pause_cnt++;
2252	hw->stop_pending = 1;
2253
2254	if ((hw->rx_list_sts[0] == sts_free) &&
2255	    (hw->rx_list_sts[1] == sts_free))
2256		crystalhd_hw_finalize_pause(hw);
2257
2258	return BC_STS_SUCCESS;
2259}
2260
2261enum BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw)
2262{
2263	enum BC_STATUS sts;
2264	uint32_t aspm;
2265
2266	hw->stop_pending = 0;
2267
2268	aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
2269	aspm &= ~ASPM_L1_ENABLE;
2270/* NAREN BCMLOG(BCMLOG_INFO, "aspm off\n"); */
2271	crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
2272
2273	sts = crystalhd_hw_start_capture(hw);
2274	return sts;
2275}
2276
2277enum BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw)
2278{
2279	enum BC_STATUS sts;
2280
2281	if (!hw) {
2282		BCMLOG_ERR("Invalid Arguments\n");
2283		return BC_STS_INV_ARG;
2284	}
2285
2286	sts = crystalhd_put_ddr2sleep(hw);
2287	if (sts != BC_STS_SUCCESS) {
2288		BCMLOG_ERR("Failed to Put DDR To Sleep!!\n");
2289		return BC_STS_ERROR;
2290	}
2291
2292	if (!crystalhd_stop_device(hw->adp)) {
2293		BCMLOG_ERR("Failed to Stop Device!!\n");
2294		return BC_STS_ERROR;
2295	}
2296
2297	return BC_STS_SUCCESS;
2298}
2299
2300void crystalhd_hw_stats(struct crystalhd_hw *hw, struct crystalhd_hw_stats *stats)
2301{
2302	if (!hw) {
2303		BCMLOG_ERR("Invalid Arguments\n");
2304		return;
2305	}
2306
2307	/* if called w/NULL stats, its a req to zero out the stats */
2308	if (!stats) {
2309		memset(&hw->stats, 0, sizeof(hw->stats));
2310		return;
2311	}
2312
2313	hw->stats.freeq_count = crystalhd_dioq_count(hw->rx_freeq);
2314	hw->stats.rdyq_count  = crystalhd_dioq_count(hw->rx_rdyq);
2315	memcpy(stats, &hw->stats, sizeof(*stats));
2316}
2317
2318enum BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *hw)
2319{
2320	uint32_t reg, n, i;
2321	uint32_t vco_mg, refresh_reg;
2322
2323	if (!hw) {
2324		BCMLOG_ERR("Invalid Arguments\n");
2325		return BC_STS_INV_ARG;
2326	}
2327
2328	/*n = (hw->core_clock_mhz * 3) / 20 + 1; */
2329	n = hw->core_clock_mhz/5;
2330
2331	if (n == hw->prev_n)
2332		return BC_STS_CLK_NOCHG;
2333
2334	if (hw->pwr_lock > 0) {
2335		/* BCMLOG(BCMLOG_INFO,"pwr_lock is %u\n", hw->pwr_lock) */
2336		return BC_STS_CLK_NOCHG;
2337	}
2338
2339	i = n * 27;
2340	if (i < 560)
2341		vco_mg = 0;
2342	else if (i < 900)
2343		vco_mg = 1;
2344	else if (i < 1030)
2345		vco_mg = 2;
2346	else
2347		vco_mg = 3;
2348
2349	reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
2350
2351	reg &= 0xFFFFCFC0;
2352	reg |= n;
2353	reg |= vco_mg << 12;
2354
2355	BCMLOG(BCMLOG_INFO, "clock is moving to %d with n %d with vco_mg %d\n",
2356	       hw->core_clock_mhz, n, vco_mg);
2357
2358	/* Change the DRAM refresh rate to accomodate the new frequency */
2359	/* refresh reg = ((refresh_rate * clock_rate)/16) - 1; rounding up*/
2360	refresh_reg = (7 * hw->core_clock_mhz / 16);
2361	bc_dec_reg_wr(hw->adp, SDRAM_REF_PARAM, ((1 << 12) | refresh_reg));
2362
2363	bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
2364
2365	i = 0;
2366
2367	for (i = 0; i < 10; i++) {
2368		reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
2369
2370		if (reg & 0x00020000) {
2371			hw->prev_n = n;
2372			BCMLOG(BCMLOG_INFO, "C");
2373			return BC_STS_SUCCESS;
2374		} else {
2375			msleep_interruptible(10);
2376		}
2377	}
2378	BCMLOG(BCMLOG_INFO, "clk change failed\n");
2379	return BC_STS_CLK_NOCHG;
2380}
2381