Lines Matching defs:tfd

83 				   struct iwl_tfh_tfd *tfd)
85 return le16_to_cpu(tfd->num_tbs) & 0x1f;
88 int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
91 int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
106 tb = &tfd->tbs[idx];
109 if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
111 trans->txqs.tfd.max_tbs);
118 tfd->num_tbs = cpu_to_le16(idx + 1);
124 struct iwl_tfh_tfd *tfd)
126 tfd->num_tbs = 0;
128 iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma,
133 struct iwl_tfh_tfd *tfd)
138 num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
140 if (num_tbs > trans->txqs.tfd.max_tbs) {
149 le64_to_cpu(tfd->tbs[i].addr),
150 le16_to_cpu(tfd->tbs[i].tb_len),
154 le64_to_cpu(tfd->tbs[i].addr),
155 le16_to_cpu(tfd->tbs[i].tb_len),
159 iwl_txq_set_tfd_invalid_gen2(trans, tfd);
217 struct iwl_tfh_tfd *tfd,
229 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
267 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
339 struct iwl_tfh_tfd *tfd, int start_len,
353 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
427 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
443 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
474 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
486 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
508 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
510 if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
516 return tfd;
519 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
525 struct iwl_tfh_tfd *tfd,
541 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
562 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
578 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
603 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
604 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
615 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
622 if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
630 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
635 if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
639 return tfd;
642 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
655 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
669 memset(tfd, 0, sizeof(*tfd));
730 void *tfd;
778 tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
779 if (!tfd) {
798 iwl_txq_gen2_get_num_tbs(trans, tfd));
860 trans->txqs.tfd.size * txq->n_window,
951 "Max tfd queue size must be a power of two, but is %d",
1040 struct iwl_tfd *tfd)
1042 tfd->num_tbs = 0;
1044 iwl_pcie_gen1_tfd_set_tb(trans, tfd, 0, trans->invalid_tx_cmd.dma,
1063 tfd_sz = trans->txqs.tfd.size * num_entries;
1104 void *tfd = iwl_txq_get_tfd(trans, txq, i);
1107 iwl_txq_set_tfd_invalid_gen2(trans, tfd);
1109 iwl_txq_set_tfd_invalid_gen1(trans, tfd);
1371 struct iwl_tfd *tfd, u8 idx)
1373 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
1397 struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);
1400 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
1402 if (num_tbs > trans->txqs.tfd.max_tbs) {
1414 tfd, i),
1416 tfd, i),
1421 tfd, i),
1423 tfd, i),
1429 iwl_txq_set_tfd_invalid_gen1(trans, tfd);