• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/isdn/hisax/

Lines Matching defs:bcs

141 GetFreeFifoBytes_B(struct BCState *bcs)
145 if (bcs->hw.hfc.f1 == bcs->hw.hfc.f2)
146 return (bcs->cs->hw.hfcD.bfifosize);
147 s = bcs->hw.hfc.send[bcs->hw.hfc.f1] - bcs->hw.hfc.send[bcs->hw.hfc.f2];
149 s += bcs->cs->hw.hfcD.bfifosize;
150 s = bcs->cs->hw.hfcD.bfifosize - s;
181 *hfc_empty_fifo(struct BCState *bcs, int count)
185 struct IsdnCardState *cs = bcs->cs;
196 cip = HFCB_FIFO | HFCB_FIFO_OUT | HFCB_REC | HFCB_CHANNEL(bcs->channel);
205 cip = HFCB_FIFO | HFCB_FIFO_OUT | HFCB_REC | HFCB_CHANNEL(bcs->channel);
207 bcs->err_inv++;
217 cip = HFCB_FIFO | HFCB_FIFO_OUT | HFCB_REC | HFCB_CHANNEL(bcs->channel);
227 printk(KERN_WARNING "HFC FIFO channel %d BUSY Error\n", bcs->channel);
239 bcs->channel, chksum, stat);
245 bcs->err_crc++;
253 HFCB_REC | HFCB_CHANNEL(bcs->channel));
259 hfc_fill_fifo(struct BCState *bcs)
261 struct IsdnCardState *cs = bcs->cs;
266 if (!bcs->tx_skb)
268 if (bcs->tx_skb->len <= 0)
270 SelFiFo(cs, HFCB_SEND | HFCB_CHANNEL(bcs->channel));
271 cip = HFCB_FIFO | HFCB_F1 | HFCB_SEND | HFCB_CHANNEL(bcs->channel);
273 bcs->hw.hfc.f1 = ReadReg(cs, HFCD_DATA, cip);
275 cip = HFCB_FIFO | HFCB_F2 | HFCB_SEND | HFCB_CHANNEL(bcs->channel);
277 bcs->hw.hfc.f2 = ReadReg(cs, HFCD_DATA, cip);
278 bcs->hw.hfc.send[bcs->hw.hfc.f1] = ReadZReg(cs, HFCB_FIFO | HFCB_Z1 | HFCB_SEND | HFCB_CHANNEL(bcs->channel));
281 bcs->channel, bcs->hw.hfc.f1, bcs->hw.hfc.f2,
282 bcs->hw.hfc.send[bcs->hw.hfc.f1]);
283 fcnt = bcs->hw.hfc.f1 - bcs->hw.hfc.f2;
291 count = GetFreeFifoBytes_B(bcs);
294 bcs->channel, bcs->tx_skb->len,
296 if (count < bcs->tx_skb->len) {
301 cip = HFCB_FIFO | HFCB_FIFO_IN | HFCB_SEND | HFCB_CHANNEL(bcs->channel);
305 WriteReg(cs, HFCD_DATA_NODEB, cip, bcs->tx_skb->data[idx++]);
306 while (idx < bcs->tx_skb->len) {
309 WriteReg(cs, HFCD_DATA_NODEB, cip, bcs->tx_skb->data[idx]);
312 if (idx != bcs->tx_skb->len) {
314 printk(KERN_WARNING "HFC S FIFO channel %d BUSY Error\n", bcs->channel);
316 bcs->tx_cnt -= bcs->tx_skb->len;
317 if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
318 (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
320 spin_lock_irqsave(&bcs->aclock, flags);
321 bcs->ackcnt += bcs->tx_skb->len;
322 spin_unlock_irqrestore(&bcs->aclock, flags);
323 schedule_event(bcs, B_ACKPENDING);
325 dev_kfree_skb_any(bcs->tx_skb);
326 bcs->tx_skb = NULL;
330 ReadReg(cs, HFCD_DATA, HFCB_FIFO | HFCB_F1_INC | HFCB_SEND | HFCB_CHANNEL(bcs->channel));
332 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
337 hfc_send_data(struct BCState *bcs)
339 struct IsdnCardState *cs = bcs->cs;
342 hfc_fill_fifo(bcs);
345 debugl1(cs,"send_data %d blocked", bcs->channel);
349 main_rec_2bds0(struct BCState *bcs)
351 struct IsdnCardState *cs = bcs->cs;
360 debugl1(cs,"rec_data %d blocked", bcs->channel);
363 SelFiFo(cs, HFCB_REC | HFCB_CHANNEL(bcs->channel));
364 cip = HFCB_FIFO | HFCB_F1 | HFCB_REC | HFCB_CHANNEL(bcs->channel);
367 cip = HFCB_FIFO | HFCB_F2 | HFCB_REC | HFCB_CHANNEL(bcs->channel);
373 bcs->channel, f1, f2);
374 z1 = ReadZReg(cs, HFCB_FIFO | HFCB_Z1 | HFCB_REC | HFCB_CHANNEL(bcs->channel));
375 z2 = ReadZReg(cs, HFCB_FIFO | HFCB_Z2 | HFCB_REC | HFCB_CHANNEL(bcs->channel));
382 bcs->channel, z1, z2, rcnt);
383 if ((skb = hfc_empty_fifo(bcs, rcnt))) {
384 skb_queue_tail(&bcs->rqueue, skb);
385 schedule_event(bcs, B_RCVBUFREADY);
403 mode_2bs0(struct BCState *bcs, int mode, int bc)
405 struct IsdnCardState *cs = bcs->cs;
409 mode, bc, bcs->channel);
410 bcs->mode = mode;
411 bcs->channel = bc;
453 struct BCState *bcs = st->l1.bcs;
459 spin_lock_irqsave(&bcs->cs->lock, flags);
460 if (bcs->tx_skb) {
461 skb_queue_tail(&bcs->squeue, skb);
463 bcs->tx_skb = skb;
464 // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
465 bcs->cs->BC_Send_Data(bcs);
467 spin_unlock_irqrestore(&bcs->cs->lock, flags);
470 spin_lock_irqsave(&bcs->cs->lock, flags);
471 if (bcs->tx_skb) {
474 // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
475 bcs->tx_skb = skb;
476 bcs->cs->BC_Send_Data(bcs);
478 spin_unlock_irqrestore(&bcs->cs->lock, flags);
481 if (!bcs->tx_skb) {
488 spin_lock_irqsave(&bcs->cs->lock, flags);
489 test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
490 mode_2bs0(bcs, st->l1.mode, st->l1.bc);
491 spin_unlock_irqrestore(&bcs->cs->lock, flags);
498 spin_lock_irqsave(&bcs->cs->lock, flags);
499 test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
500 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
501 mode_2bs0(bcs, 0, st->l1.bc);
502 spin_unlock_irqrestore(&bcs->cs->lock, flags);
509 close_2bs0(struct BCState *bcs)
511 mode_2bs0(bcs, 0, bcs->channel);
512 if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
513 skb_queue_purge(&bcs->rqueue);
514 skb_queue_purge(&bcs->squeue);
515 if (bcs->tx_skb) {
516 dev_kfree_skb_any(bcs->tx_skb);
517 bcs->tx_skb = NULL;
518 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
524 open_hfcstate(struct IsdnCardState *cs, struct BCState *bcs)
526 if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
527 skb_queue_head_init(&bcs->rqueue);
528 skb_queue_head_init(&bcs->squeue);
530 bcs->tx_skb = NULL;
531 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
532 bcs->event = 0;
533 bcs->tx_cnt = 0;
538 setstack_2b(struct PStack *st, struct BCState *bcs)
540 bcs->channel = st->l1.bc;
541 if (open_hfcstate(st->l1.hardware, bcs))
543 st->l1.bcs = bcs;
546 bcs->st = st;
754 if (cs->bcs[0].mode && (cs->bcs[0].channel == channel))
755 return(&cs->bcs[0]);
756 else if (cs->bcs[1].mode && (cs->bcs[1].channel == channel))
757 return(&cs->bcs[1]);
766 struct BCState *bcs;
794 if (!(bcs=Sel_BCS(cs, 0))) {
798 main_rec_2bds0(bcs);
801 if (!(bcs=Sel_BCS(cs, 1))) {
805 main_rec_2bds0(bcs);
808 if (!(bcs=Sel_BCS(cs, 0))) {
812 if (bcs->tx_skb) {
814 hfc_fill_fifo(bcs);
817 debugl1(cs,"fill_data %d blocked", bcs->channel);
819 if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
821 hfc_fill_fifo(bcs);
824 debugl1(cs,"fill_data %d blocked", bcs->channel);
826 schedule_event(bcs, B_XMTBUFREADY);
832 if (!(bcs=Sel_BCS(cs, 1))) {
836 if (bcs->tx_skb) {
838 hfc_fill_fifo(bcs);
841 debugl1(cs,"fill_data %d blocked", bcs->channel);
843 if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
845 hfc_fill_fifo(bcs);
848 debugl1(cs,"fill_data %d blocked", bcs->channel);
850 schedule_event(bcs, B_XMTBUFREADY);
1040 if (!cs->bcs[0].hw.hfc.send)
1041 cs->bcs[0].hw.hfc.send = init_send_hfcd(32);
1042 if (!cs->bcs[1].hw.hfc.send)
1043 cs->bcs[1].hw.hfc.send = init_send_hfcd(32);
1045 cs->bcs[0].BC_SetStack = setstack_2b;
1046 cs->bcs[1].BC_SetStack = setstack_2b;
1047 cs->bcs[0].BC_Close = close_2bs0;
1048 cs->bcs[1].BC_Close = close_2bs0;
1049 mode_2bs0(cs->bcs, 0, 0);
1050 mode_2bs0(cs->bcs + 1, 0, 1);
1056 kfree(cs->bcs[0].hw.hfc.send);
1057 cs->bcs[0].hw.hfc.send = NULL;
1058 kfree(cs->bcs[1].hw.hfc.send);
1059 cs->bcs[1].hw.hfc.send = NULL;