• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/net/irda/

Lines Matching refs:si

126 static inline void pxa_irda_disable_clk(struct pxa_irda *si)
128 if (si->cur_clk)
129 clk_disable(si->cur_clk);
130 si->cur_clk = NULL;
133 static inline void pxa_irda_enable_firclk(struct pxa_irda *si)
135 si->cur_clk = si->fir_clk;
136 clk_enable(si->fir_clk);
139 static inline void pxa_irda_enable_sirclk(struct pxa_irda *si)
141 si->cur_clk = si->sir_clk;
142 clk_enable(si->sir_clk);
146 #define IS_FIR(si) ((si)->speed >= 4000000)
149 inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda *si)
151 DCSR(si->rxdma) = DCSR_NODESC;
152 DSADR(si->rxdma) = __PREG(ICDR);
153 DTADR(si->rxdma) = si->dma_rx_buff_phy;
154 DCMD(si->rxdma) = DCMD_INCTRGADDR | DCMD_FLOWSRC | DCMD_WIDTH1 | DCMD_BURST32 | IRDA_FRAME_SIZE_LIMIT;
155 DCSR(si->rxdma) |= DCSR_RUN;
158 inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
160 DCSR(si->txdma) = DCSR_NODESC;
161 DSADR(si->txdma) = si->dma_tx_buff_phy;
162 DTADR(si->txdma) = __PREG(ICDR);
163 DCMD(si->txdma) = DCMD_INCSRCADDR | DCMD_FLOWTRG | DCMD_ENDIRQEN | DCMD_WIDTH1 | DCMD_BURST32 | si->dma_tx_buff_len;
164 DCSR(si->txdma) |= DCSR_RUN;
170 static void pxa_irda_set_mode(struct pxa_irda *si, int mode)
172 if (si->pdata->transceiver_mode)
173 si->pdata->transceiver_mode(si->dev, mode);
175 if (gpio_is_valid(si->pdata->gpio_pwdown))
176 gpio_set_value(si->pdata->gpio_pwdown,
178 !si->pdata->gpio_pwdown_inverted);
179 pxa2xx_transceiver_mode(si->dev, mode);
186 static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
201 if (IS_FIR(si)) {
203 DCSR(si->rxdma) &= ~DCSR_RUN;
206 pxa_irda_disable_clk(si);
209 pxa_irda_set_mode(si, IR_SIRMODE);
212 pxa_irda_enable_sirclk(si);
224 si->speed = speed;
237 pxa_irda_disable_clk(si);
243 pxa_irda_set_mode(si, IR_FIRMODE);
246 pxa_irda_enable_firclk(si);
248 si->speed = speed;
249 pxa_irda_fir_dma_rx_start(si);
266 struct pxa_irda *si = netdev_priv(dev);
286 &si->rx_buff, data);
290 si->last_oscr = OSCR;
299 async_unwrap_char(dev, &dev->stats, &si->rx_buff, STRBR);
301 si->last_oscr = OSCR;
305 while ((si->tx_buff.len) && (STLSR & LSR_TDRQ)) {
306 STTHR = *si->tx_buff.data++;
307 si->tx_buff.len -= 1;
310 if (si->tx_buff.len == 0) {
312 dev->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head;
317 si->last_oscr = OSCR;
324 if (si->newspeed) {
325 pxa_irda_set_speed(si, si->newspeed);
326 si->newspeed = 0;
356 struct pxa_irda *si = netdev_priv(dev);
364 dev->stats.tx_bytes += si->dma_tx_buff_len;
371 si->last_oscr = OSCR;
379 if (si->newspeed) {
380 pxa_irda_set_speed(si, si->newspeed);
381 si->newspeed = 0;
386 pxa_irda_fir_dma_rx_start(si);
398 static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0)
403 len = DTADR(si->rxdma) - si->dma_rx_buff_phy;
422 si->dma_rx_buff[len++] = data;
448 skb_copy_to_linear_data(skb, si->dma_rx_buff, len);
466 struct pxa_irda *si = netdev_priv(dev);
470 DCSR(si->rxdma) &= ~DCSR_RUN;
471 si->last_oscr = OSCR;
487 pxa_irda_fir_irq_eif(si, dev, icsr0);
491 pxa_irda_fir_dma_rx_start(si);
505 struct pxa_irda *si = netdev_priv(dev);
513 if (speed != si->speed && speed != -1)
514 si->newspeed = speed;
520 if (si->newspeed) {
521 si->newspeed = 0;
522 pxa_irda_set_speed(si, speed);
530 if (!IS_FIR(si)) {
531 si->tx_buff.data = si->tx_buff.head;
532 si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize);
543 si->dma_tx_buff_len = skb->len;
544 skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len);
547 while ((unsigned)(OSCR - si->last_oscr)/4 < mtt)
551 DCSR(si->rxdma) &= ~DCSR_RUN;
554 pxa_irda_fir_dma_tx_start(si);
565 struct pxa_irda *si = netdev_priv(dev);
577 ret = pxa_irda_set_speed(si,
596 rq->ifr_receiving = IS_FIR(si) ? 0
597 : si->rx_buff.state != OUTSIDE_FRAME;
608 static void pxa_irda_startup(struct pxa_irda *si)
625 DRCMR(17) = si->rxdma | DRCMR_MAPVLD;
626 DRCMR(18) = si->txdma | DRCMR_MAPVLD;
629 si->speed = 4000000;
630 pxa_irda_set_speed(si, 9600);
635 static void pxa_irda_shutdown(struct pxa_irda *si)
647 DCSR(si->txdma) &= ~DCSR_RUN;
648 DCSR(si->rxdma) &= ~DCSR_RUN;
653 pxa_irda_disable_clk(si);
661 pxa_irda_set_mode(si, IR_OFF);
668 struct pxa_irda *si = netdev_priv(dev);
671 si->speed = 9600;
688 si->rxdma = pxa_request_dma("FICP_RX",DMA_PRIO_LOW, pxa_irda_fir_dma_rx_irq, dev);
689 if (si->rxdma < 0)
692 si->txdma = pxa_request_dma("FICP_TX",DMA_PRIO_LOW, pxa_irda_fir_dma_tx_irq, dev);
693 if (si->txdma < 0)
697 si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
698 &si->dma_rx_buff_phy, GFP_KERNEL );
699 if (!si->dma_rx_buff)
702 si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
703 &si->dma_tx_buff_phy, GFP_KERNEL );
704 if (!si->dma_tx_buff)
708 pxa_irda_startup(si);
713 si->irlap = irlap_open(dev, &si->qos, "pxa");
715 if (!si->irlap)
730 pxa_irda_shutdown(si);
731 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
733 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
735 pxa_free_dma(si->txdma);
737 pxa_free_dma(si->rxdma);
749 struct pxa_irda *si = netdev_priv(dev);
753 pxa_irda_shutdown(si);
756 if (si->irlap) {
757 irlap_close(si->irlap);
758 si->irlap = NULL;
764 pxa_free_dma(si->rxdma);
765 pxa_free_dma(si->txdma);
767 if (si->dma_rx_buff)
768 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
769 if (si->dma_tx_buff)
770 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
779 struct pxa_irda *si;
782 si = netdev_priv(dev);
784 pxa_irda_shutdown(si);
793 struct pxa_irda *si;
796 si = netdev_priv(dev);
797 pxa_irda_startup(si);
828 struct pxa_irda *si;
848 si = netdev_priv(dev);
849 si->dev = &pdev->dev;
850 si->pdata = pdev->dev.platform_data;
852 si->sir_clk = clk_get(&pdev->dev, "UARTCLK");
853 si->fir_clk = clk_get(&pdev->dev, "FICPCLK");
854 if (IS_ERR(si->sir_clk) || IS_ERR(si->fir_clk)) {
855 err = PTR_ERR(IS_ERR(si->sir_clk) ? si->sir_clk : si->fir_clk);
862 err = pxa_irda_init_iobuf(&si->rx_buff, 14384);
865 err = pxa_irda_init_iobuf(&si->tx_buff, 4000);
869 if (gpio_is_valid(si->pdata->gpio_pwdown)) {
870 err = gpio_request(si->pdata->gpio_pwdown, "IrDA switch");
873 err = gpio_direction_output(si->pdata->gpio_pwdown,
874 !si->pdata->gpio_pwdown_inverted);
876 gpio_free(si->pdata->gpio_pwdown);
881 if (si->pdata->startup) {
882 err = si->pdata->startup(si->dev);
887 if (gpio_is_valid(si->pdata->gpio_pwdown) && si->pdata->startup)
888 dev_warn(si->dev, "gpio_pwdown and startup() both defined!\n");
892 irda_init_max_qos_capabilies(&si->qos);
895 if (si->pdata->transceiver_cap & IR_SIRMODE)
897 if (si->pdata->transceiver_cap & IR_FIRMODE)
900 si->qos.baud_rate.bits &= baudrate_mask;
901 si->qos.min_turn_time.bits = 7; /* 1ms or more */
903 irda_qos_bits_to_value(&si->qos);
911 if (si->pdata->shutdown)
912 si->pdata->shutdown(si->dev);
914 kfree(si->tx_buff.head);
916 kfree(si->rx_buff.head);
918 if (si->sir_clk && !IS_ERR(si->sir_clk))
919 clk_put(si->sir_clk);
920 if (si->fir_clk && !IS_ERR(si->fir_clk))
921 clk_put(si->fir_clk);
937 struct pxa_irda *si = netdev_priv(dev);
939 if (gpio_is_valid(si->pdata->gpio_pwdown))
940 gpio_free(si->pdata->gpio_pwdown);
941 if (si->pdata->shutdown)
942 si->pdata->shutdown(si->dev);
943 kfree(si->tx_buff.head);
944 kfree(si->rx_buff.head);
945 clk_put(si->fir_clk);
946 clk_put(si->sir_clk);