Lines Matching defs:xc

115 /* xc->vc.lock must be held by caller */
117 uniphier_xdmac_next_desc(struct uniphier_xdmac_chan *xc)
121 vd = vchan_next_desc(&xc->vc);
130 /* xc->vc.lock must be held by caller */
131 static void uniphier_xdmac_chan_start(struct uniphier_xdmac_chan *xc,
151 buswidth = xc->sconfig.src_addr_width;
160 buswidth = xc->sconfig.dst_addr_width;
169 val |= FIELD_PREP(XDMAC_TFA_MASK, xc->req_factor);
170 writel(val, xc->reg_ch_base + XDMAC_TFA);
173 writel(lower_32_bits(src_addr), xc->reg_ch_base + XDMAC_SAD);
174 writel(upper_32_bits(src_addr), xc->reg_ch_base + XDMAC_EXSAD);
176 writel(lower_32_bits(dst_addr), xc->reg_ch_base + XDMAC_DAD);
177 writel(upper_32_bits(dst_addr), xc->reg_ch_base + XDMAC_EXDAD);
181 writel(src_mode, xc->reg_ch_base + XDMAC_SADM);
182 writel(dst_mode, xc->reg_ch_base + XDMAC_DADM);
184 writel(its, xc->reg_ch_base + XDMAC_ITS);
185 writel(tnum, xc->reg_ch_base + XDMAC_TNUM);
189 xc->reg_ch_base + XDMAC_IEN);
192 val = readl(xc->reg_ch_base + XDMAC_TSS);
194 writel(val, xc->reg_ch_base + XDMAC_TSS);
197 /* xc->vc.lock must be held by caller */
198 static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan *xc)
203 val = readl(xc->reg_ch_base + XDMAC_IEN);
205 writel(val, xc->reg_ch_base + XDMAC_IEN);
208 val = readl(xc->reg_ch_base + XDMAC_TSS);
210 writel(0, xc->reg_ch_base + XDMAC_TSS);
213 return readl_poll_timeout_atomic(xc->reg_ch_base + XDMAC_STAT, val,
217 /* xc->vc.lock must be held by caller */
218 static void uniphier_xdmac_start(struct uniphier_xdmac_chan *xc)
222 xd = uniphier_xdmac_next_desc(xc);
224 uniphier_xdmac_chan_start(xc, xd);
227 xc->xd = xd;
230 static void uniphier_xdmac_chan_irq(struct uniphier_xdmac_chan *xc)
235 spin_lock(&xc->vc.lock);
237 stat = readl(xc->reg_ch_base + XDMAC_ID);
240 ret = uniphier_xdmac_chan_stop(xc);
242 dev_err(xc->xdev->ddev.dev,
245 dev_err(xc->xdev->ddev.dev,
248 } else if ((stat & XDMAC_ID_ENDIDF) && xc->xd) {
249 xc->xd->cur_node++;
250 if (xc->xd->cur_node >= xc->xd->nr_node) {
251 vchan_cookie_complete(&xc->xd->vd);
252 uniphier_xdmac_start(xc);
254 uniphier_xdmac_chan_start(xc, xc->xd);
259 writel(stat, xc->reg_ch_base + XDMAC_IR);
261 spin_unlock(&xc->vc.lock);
325 struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
336 buswidth = xc->sconfig.src_addr_width;
337 maxburst = xc->sconfig.src_maxburst;
339 buswidth = xc->sconfig.dst_addr_width;
340 maxburst = xc->sconfig.dst_maxburst;
345 if (maxburst > xc->xdev->ddev.max_burst) {
346 dev_err(xc->xdev->ddev.dev,
358 ? xc->sconfig.src_addr : sg_dma_address(sg);
360 ? xc->sconfig.dst_addr : sg_dma_address(sg);
374 dev_err(xc->xdev->ddev.dev,
381 dev_err(xc->xdev->ddev.dev,
398 struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
400 memcpy(&xc->sconfig, config, sizeof(*config));
408 struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
415 if (xc->xd) {
416 vchan_terminate_vdesc(&xc->xd->vd);
417 xc->xd = NULL;
418 ret = uniphier_xdmac_chan_stop(xc);
438 struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
443 if (vchan_issue_pending(vc) && !xc->xd)
444 uniphier_xdmac_start(xc);
457 struct uniphier_xdmac_chan *xc = &xdev->channels[ch];
459 xc->xdev = xdev;
460 xc->reg_ch_base = xdev->reg_base + XDMAC_CH_WIDTH * ch;
461 xc->vc.desc_free = uniphier_xdmac_desc_free;
463 vchan_init(&xc->vc, &xdev->ddev);