Lines Matching refs:chan

266 int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
273 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &tsa_info);
277 spin_lock_irqsave(&chan->ts_lock, flags);
279 info->mode = chan->mode;
282 info->nb_tx_ts = hweight64(chan->tx_ts_mask);
285 info->nb_rx_ts = hweight64(chan->rx_ts_mask);
287 spin_unlock_irqrestore(&chan->ts_lock, flags);
293 int qmc_chan_get_ts_info(struct qmc_chan *chan, struct qmc_chan_ts_info *ts_info)
297 spin_lock_irqsave(&chan->ts_lock, flags);
299 ts_info->rx_ts_mask_avail = chan->rx_ts_mask_avail;
300 ts_info->tx_ts_mask_avail = chan->tx_ts_mask_avail;
301 ts_info->rx_ts_mask = chan->rx_ts_mask;
302 ts_info->tx_ts_mask = chan->tx_ts_mask;
304 spin_unlock_irqrestore(&chan->ts_lock, flags);
310 int qmc_chan_set_ts_info(struct qmc_chan *chan, const struct qmc_chan_ts_info *ts_info)
316 if ((ts_info->rx_ts_mask & chan->rx_ts_mask_avail) != ts_info->rx_ts_mask)
318 if ((ts_info->tx_ts_mask & chan->tx_ts_mask_avail) != ts_info->tx_ts_mask)
322 if (chan->qmc->is_tsa_64rxtx) {
327 spin_lock_irqsave(&chan->ts_lock, flags);
329 if ((chan->tx_ts_mask != ts_info->tx_ts_mask && !chan->is_tx_stopped) ||
330 (chan->rx_ts_mask != ts_info->rx_ts_mask && !chan->is_rx_stopped)) {
331 dev_err(chan->qmc->dev, "Channel rx and/or tx not stopped\n");
334 chan->tx_ts_mask = ts_info->tx_ts_mask;
335 chan->rx_ts_mask = ts_info->rx_ts_mask;
338 spin_unlock_irqrestore(&chan->ts_lock, flags);
344 int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param)
346 if (param->mode != chan->mode)
355 qmc_write16(chan->qmc->scc_pram + QMC_GBL_MRBLR,
357 qmc_write16(chan->s_param + QMC_SPE_MFLR,
360 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR,
363 qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR,
369 qmc_write16(chan->s_param + QMC_SPE_TMRBLR,
381 int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
398 spin_lock_irqsave(&chan->tx_lock, flags);
399 bd = chan->txbd_free;
411 xfer_desc = &chan->tx_desc[bd - chan->txbds];
420 if (!chan->is_tx_stopped)
421 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
424 chan->txbd_free = chan->txbds;
426 chan->txbd_free++;
431 spin_unlock_irqrestore(&chan->tx_lock, flags);
436 static void qmc_chan_write_done(struct qmc_chan *chan)
453 spin_lock_irqsave(&chan->tx_lock, flags);
454 bd = chan->txbd_done;
461 xfer_desc = &chan->tx_desc[bd - chan->txbds];
470 chan->txbd_done = chan->txbds;
472 chan->txbd_done++;
475 spin_unlock_irqrestore(&chan->tx_lock, flags);
477 spin_lock_irqsave(&chan->tx_lock, flags);
480 bd = chan->txbd_done;
485 spin_unlock_irqrestore(&chan->tx_lock, flags);
488 int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
506 spin_lock_irqsave(&chan->rx_lock, flags);
507 bd = chan->rxbd_free;
519 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
533 if (chan->is_rx_halted && !chan->is_rx_stopped) {
535 if (chan->mode == QMC_TRANSPARENT)
536 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
538 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
539 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
540 chan->is_rx_halted = false;
542 chan->rx_pending++;
545 chan->rxbd_free = chan->rxbds;
547 chan->rxbd_free++;
551 spin_unlock_irqrestore(&chan->rx_lock, flags);
556 static void qmc_chan_read_done(struct qmc_chan *chan)
574 spin_lock_irqsave(&chan->rx_lock, flags);
575 bd = chan->rxbd_done;
582 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
592 chan->rxbd_done = chan->rxbds;
594 chan->rxbd_done++;
596 chan->rx_pending--;
599 spin_unlock_irqrestore(&chan->rx_lock, flags);
617 spin_lock_irqsave(&chan->rx_lock, flags);
620 bd = chan->rxbd_done;
625 spin_unlock_irqrestore(&chan->rx_lock, flags);
628 static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_serial_info *info,
639 if (chan->tx_ts_mask != chan->rx_ts_mask) {
640 dev_err(chan->qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
644 val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
648 if (!(chan->rx_ts_mask & (((u64)1) << i)))
651 curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
653 dev_err(chan->qmc->dev, "chan %u TxRx entry %d already used\n",
654 chan->id, i);
661 if (!(chan->rx_ts_mask & (((u64)1) << i)))
664 qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
671 static int qmc_chan_setup_tsa_32rx(struct qmc_chan *chan, const struct tsa_serial_info *info,
680 val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
684 if (!(chan->rx_ts_mask & (((u64)1) << i)))
687 curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
689 dev_err(chan->qmc->dev, "chan %u Rx entry %d already used\n",
690 chan->id, i);
697 if (!(chan->rx_ts_mask & (((u64)1) << i)))
700 qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
707 static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_serial_info *info,
716 val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
720 if (!(chan->tx_ts_mask & (((u64)1) << i)))
723 curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2));
725 dev_err(chan->qmc->dev, "chan %u Tx entry %d already used\n",
726 chan->id, i);
733 if (!(chan->tx_ts_mask & (((u64)1) << i)))
736 qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2),
743 static int qmc_chan_setup_tsa_tx(struct qmc_chan *chan, bool enable)
749 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
754 if (chan->qmc->is_tsa_64rxtx)
755 return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
757 return qmc_chan_setup_tsa_32tx(chan, &info, enable);
760 static int qmc_chan_setup_tsa_rx(struct qmc_chan *chan, bool enable)
766 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
771 if (chan->qmc->is_tsa_64rxtx)
772 return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
774 return qmc_chan_setup_tsa_32rx(chan, &info, enable);
777 static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode)
779 return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E);
782 static int qmc_chan_stop_rx(struct qmc_chan *chan)
787 spin_lock_irqsave(&chan->rx_lock, flags);
789 if (chan->is_rx_stopped) {
796 ret = qmc_chan_command(chan, 0x0);
798 dev_err(chan->qmc->dev, "chan %u: Send STOP RECEIVE failed (%d)\n",
799 chan->id, ret);
803 chan->is_rx_stopped = true;
805 if (!chan->qmc->is_tsa_64rxtx || chan->is_tx_stopped) {
806 ret = qmc_chan_setup_tsa_rx(chan, false);
808 dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
809 chan->id, ret);
815 spin_unlock_irqrestore(&chan->rx_lock, flags);
819 static int qmc_chan_stop_tx(struct qmc_chan *chan)
824 spin_lock_irqsave(&chan->tx_lock, flags);
826 if (chan->is_tx_stopped) {
833 ret = qmc_chan_command(chan, 0x1);
835 dev_err(chan->qmc->dev, "chan %u: Send STOP TRANSMIT failed (%d)\n",
836 chan->id, ret);
840 chan->is_tx_stopped = true;
842 if (!chan->qmc->is_tsa_64rxtx || chan->is_rx_stopped) {
843 ret = qmc_chan_setup_tsa_tx(chan, false);
845 dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
846 chan->id, ret);
852 spin_unlock_irqrestore(&chan->tx_lock, flags);
856 static int qmc_chan_start_rx(struct qmc_chan *chan);
858 int qmc_chan_stop(struct qmc_chan *chan, int direction)
864 spin_lock_irqsave(&chan->ts_lock, flags);
867 is_rx_rollback_needed = !chan->is_rx_stopped;
868 ret = qmc_chan_stop_rx(chan);
874 ret = qmc_chan_stop_tx(chan);
878 qmc_chan_start_rx(chan);
884 spin_unlock_irqrestore(&chan->ts_lock, flags);
889 static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
897 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
902 first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
905 last_tx = fls64(chan->tx_ts_mask);
913 qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
915 dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
916 chan->id, trnsync,
917 first_rx, info.nb_rx_ts, chan->rx_ts_mask,
918 last_tx, info.nb_tx_ts, chan->tx_ts_mask);
923 static int qmc_chan_start_rx(struct qmc_chan *chan)
928 spin_lock_irqsave(&chan->rx_lock, flags);
930 if (!chan->is_rx_stopped) {
936 ret = qmc_chan_setup_tsa_rx(chan, true);
938 dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
939 chan->id, ret);
943 ret = qmc_setup_chan_trnsync(chan->qmc, chan);
945 dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
946 chan->id, ret);
951 if (chan->mode == QMC_TRANSPARENT)
952 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
954 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
955 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
956 chan->is_rx_halted = false;
958 chan->is_rx_stopped = false;
961 spin_unlock_irqrestore(&chan->rx_lock, flags);
965 static int qmc_chan_start_tx(struct qmc_chan *chan)
970 spin_lock_irqsave(&chan->tx_lock, flags);
972 if (!chan->is_tx_stopped) {
978 ret = qmc_chan_setup_tsa_tx(chan, true);
980 dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
981 chan->id, ret);
985 ret = qmc_setup_chan_trnsync(chan->qmc, chan);
987 dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
988 chan->id, ret);
996 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
999 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
1001 chan->is_tx_stopped = false;
1004 spin_unlock_irqrestore(&chan->tx_lock, flags);
1008 int qmc_chan_start(struct qmc_chan *chan, int direction)
1014 spin_lock_irqsave(&chan->ts_lock, flags);
1017 is_rx_rollback_needed = chan->is_rx_stopped;
1018 ret = qmc_chan_start_rx(chan);
1024 ret = qmc_chan_start_tx(chan);
1028 qmc_chan_stop_rx(chan);
1034 spin_unlock_irqrestore(&chan->ts_lock, flags);
1039 static void qmc_chan_reset_rx(struct qmc_chan *chan)
1046 spin_lock_irqsave(&chan->rx_lock, flags);
1047 bd = chan->rxbds;
1052 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
1059 chan->rxbd_free = chan->rxbds;
1060 chan->rxbd_done = chan->rxbds;
1061 qmc_write16(chan->s_param + QMC_SPE_RBPTR,
1062 qmc_read16(chan->s_param + QMC_SPE_RBASE));
1064 chan->rx_pending = 0;
1066 spin_unlock_irqrestore(&chan->rx_lock, flags);
1069 static void qmc_chan_reset_tx(struct qmc_chan *chan)
1076 spin_lock_irqsave(&chan->tx_lock, flags);
1079 qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
1081 bd = chan->txbds;
1086 xfer_desc = &chan->tx_desc[bd - chan->txbds];
1093 chan->txbd_free = chan->txbds;
1094 chan->txbd_done = chan->txbds;
1095 qmc_write16(chan->s_param + QMC_SPE_TBPTR,
1096 qmc_read16(chan->s_param + QMC_SPE_TBASE));
1099 qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
1100 qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
1102 spin_unlock_irqrestore(&chan->tx_lock, flags);
1105 int qmc_chan_reset(struct qmc_chan *chan, int direction)
1108 qmc_chan_reset_rx(chan);
1111 qmc_chan_reset_tx(chan);
1120 struct qmc_chan *chan;
1149 list_for_each_entry(chan, &qmc->chan_head, list) {
1150 if (chan->tx_ts_mask_avail > tx_ts_assigned_mask) {
1151 dev_err(qmc->dev, "chan %u can use TSA unassigned Tx TS\n", chan->id);
1155 if (chan->rx_ts_mask_avail > rx_ts_assigned_mask) {
1156 dev_err(qmc->dev, "chan %u can use TSA unassigned Rx TS\n", chan->id);
1167 struct qmc_chan *chan;
1169 list_for_each_entry(chan, &qmc->chan_head, list)
1178 struct qmc_chan *chan;
1197 chan = devm_kzalloc(qmc->dev, sizeof(*chan), GFP_KERNEL);
1198 if (!chan) {
1203 chan->id = chan_id;
1204 spin_lock_init(&chan->ts_lock);
1205 spin_lock_init(&chan->rx_lock);
1206 spin_lock_init(&chan->tx_lock);
1215 chan->tx_ts_mask_avail = ts_mask;
1216 chan->tx_ts_mask = chan->tx_ts_mask_avail;
1225 chan->rx_ts_mask_avail = ts_mask;
1226 chan->rx_ts_mask = chan->rx_ts_mask_avail;
1237 chan->mode = QMC_TRANSPARENT;
1239 chan->mode = QMC_HDLC;
1247 chan->is_reverse_data = of_property_read_bool(chan_np,
1250 list_add_tail(&chan->list, &qmc->chan_head);
1251 qmc->chans[chan->id] = chan;
1342 static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
1349 chan->qmc = qmc;
1352 chan->s_param = qmc->dpram + (chan->id * 64);
1354 chan->txbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS));
1355 chan->rxbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS;
1357 chan->txbd_free = chan->txbds;
1358 chan->txbd_done = chan->txbds;
1359 chan->rxbd_free = chan->rxbds;
1360 chan->rxbd_done = chan->rxbds;
1363 val = chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS) * sizeof(cbd_t);
1364 qmc_write16(chan->s_param + QMC_SPE_TBASE, val);
1365 qmc_write16(chan->s_param + QMC_SPE_TBPTR, val);
1368 val = ((chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS) * sizeof(cbd_t);
1369 qmc_write16(chan->s_param + QMC_SPE_RBASE, val);
1370 qmc_write16(chan->s_param + QMC_SPE_RBPTR, val);
1371 qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
1372 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
1373 qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
1374 if (chan->mode == QMC_TRANSPARENT) {
1375 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
1376 qmc_write16(chan->s_param + QMC_SPE_TMRBLR, 60);
1378 if (chan->is_reverse_data)
1380 qmc_write16(chan->s_param + QMC_SPE_CHAMR, val);
1381 ret = qmc_setup_chan_trnsync(qmc, chan);
1385 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
1386 qmc_write16(chan->s_param + QMC_SPE_MFLR, 60);
1387 qmc_write16(chan->s_param + QMC_SPE_CHAMR,
1392 qmc_write16(chan->s_param + QMC_SPE_INTMSK, 0x0000);
1398 bd = chan->rxbds + i;
1401 bd = chan->rxbds + QMC_NB_RXBDS - 1;
1407 if (chan->mode == QMC_HDLC)
1410 bd = chan->txbds + i;
1413 bd = chan->txbds + QMC_NB_TXBDS - 1;
1421 struct qmc_chan *chan;
1424 list_for_each_entry(chan, &qmc->chan_head, list) {
1425 ret = qmc_setup_chan(qmc, chan);
1435 struct qmc_chan *chan;
1438 list_for_each_entry(chan, &qmc->chan_head, list) {
1440 if (chan->mode == QMC_HDLC) {
1441 qmc_write16(chan->s_param + QMC_SPE_INTMSK,
1446 qmc_write16(chan->s_param + QMC_SPE_INTMSK,
1452 ret = qmc_chan_stop(chan, QMC_CHAN_ALL);
1480 struct qmc_chan *chan;
1491 chan = qmc->chans[chan_id];
1492 if (!chan) {
1493 dev_err(qmc->dev, "interrupt on invalid chan %u\n", chan_id);
1498 qmc_chan_write_done(chan);
1501 dev_info(qmc->dev, "intr chan %u, 0x%04x (UN)\n", chan_id,
1503 chan->nb_tx_underrun++;
1507 dev_info(qmc->dev, "intr chan %u, 0x%04x (BSY)\n", chan_id,
1509 chan->nb_rx_busy++;
1511 spin_lock_irqsave(&chan->rx_lock, flags);
1512 if (chan->rx_pending && !chan->is_rx_stopped) {
1513 if (chan->mode == QMC_TRANSPARENT)
1514 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
1516 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
1517 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
1518 chan->is_rx_halted = false;
1520 chan->is_rx_halted = true;
1522 spin_unlock_irqrestore(&chan->rx_lock, flags);
1526 qmc_chan_read_done(chan);
1817 void qmc_chan_put(struct qmc_chan *chan)
1819 put_device(chan->qmc->dev);