Lines Matching defs:sub

29 	struct uniphier_aio_sub *sub = &aio->sub[compr->direction];
37 sub->compr_area = kzalloc(size, GFP_KERNEL);
38 if (!sub->compr_area)
41 if (sub->swm->dir == PORT_DIR_OUTPUT)
44 sub->compr_addr = dma_map_single(dev, sub->compr_area, size, dma_dir);
45 if (dma_mapping_error(dev, sub->compr_addr)) {
46 kfree(sub->compr_area);
47 sub->compr_area = NULL;
52 sub->compr_bytes = size;
62 struct uniphier_aio_sub *sub = &aio->sub[compr->direction];
65 if (sub->swm->dir == PORT_DIR_OUTPUT)
68 dma_unmap_single(dev, sub->compr_addr, sub->compr_bytes, dma_dir);
69 kfree(sub->compr_area);
70 sub->compr_area = NULL;
80 struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
83 if (sub->cstream)
86 sub->cstream = cstream;
87 sub->pass_through = 1;
88 sub->use_mmap = false;
94 ret = aio_init(sub);
106 struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
116 sub->cstream = NULL;
127 struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
129 *params = sub->cparams.codec;
140 struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
155 sub->iec_pc = IEC61937_PC_AAC;
157 sub->cparams = *params;
158 sub->setting = 1;
160 aio_port_reset(sub);
161 aio_src_reset(sub);
171 struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
173 sub->setting = 0;
184 struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
189 ret = aiodma_ch_set_param(sub);
193 spin_lock_irqsave(&sub->lock, flags);
194 ret = aiodma_rb_set_buffer(sub, sub->compr_addr,
195 sub->compr_addr + sub->compr_bytes,
197 spin_unlock_irqrestore(&sub->lock, flags);
201 ret = aio_port_set_param(sub, sub->pass_through, &sub->params);
204 ret = aio_oport_set_stream_type(sub, sub->iec_pc);
207 aio_port_set_enable(sub, 1);
209 ret = aio_if_set_param(sub, sub->pass_through);
223 struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
228 spin_lock_irqsave(&sub->lock, flags);
231 aiodma_rb_sync(sub, sub->compr_addr, sub->compr_bytes, bytes);
232 aiodma_ch_set_enable(sub, 1);
233 sub->running = 1;
237 sub->running = 0;
238 aiodma_ch_set_enable(sub, 0);
245 spin_unlock_irqrestore(&sub->lock, flags);
257 struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
262 spin_lock_irqsave(&sub->lock, flags);
264 aiodma_rb_sync(sub, sub->compr_addr, sub->compr_bytes, bytes);
266 if (sub->swm->dir == PORT_DIR_OUTPUT) {
267 pos = sub->rd_offs;
269 tstamp->copied_total = sub->rd_total / 2;
271 pos = sub->wr_offs;
272 tstamp->copied_total = sub->rd_total;
276 spin_unlock_irqrestore(&sub->lock, flags);
281 static int aio_compr_send_to_hw(struct uniphier_aio_sub *sub,
285 u32 *dstbuf = (u32 *)(sub->compr_area + sub->wr_offs);
302 sub->iec_header = true;
306 if (sub->iec_header && sub->iec_pc != pc) {
308 sub->iec_pc = pc;
309 ret = aio_oport_set_stream_type(sub, pc);
313 sub->iec_header = false;
332 struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
333 size_t cnt = min_t(size_t, count, aio_rb_space_to_end(sub) / 2);
342 if (sub->swm->dir == PORT_DIR_OUTPUT) {
343 dma_addr_t dmapos = sub->compr_addr + sub->wr_offs;
349 ret = aio_compr_send_to_hw(sub, buf, s);
352 dma_addr_t dmapos = sub->compr_addr + sub->rd_offs;
357 ret = copy_to_user(buf, sub->compr_area + sub->rd_offs, s);
363 spin_lock_irqsave(&sub->lock, flags);
365 sub->threshold = 2 * bytes;
366 aiodma_rb_set_threshold(sub, sub->compr_bytes, 2 * bytes);
368 if (sub->swm->dir == PORT_DIR_OUTPUT) {
369 sub->wr_offs += s;
370 if (sub->wr_offs >= sub->compr_bytes)
371 sub->wr_offs -= sub->compr_bytes;
373 sub->rd_offs += s;
374 if (sub->rd_offs >= sub->compr_bytes)
375 sub->rd_offs -= sub->compr_bytes;
377 aiodma_rb_sync(sub, sub->compr_addr, sub->compr_bytes, bytes);
379 spin_unlock_irqrestore(&sub->lock, flags);