Lines Matching refs:chan

24 #include "chan.h"
41 nvkm_chan_cctx_bind(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_cctx *cctx)
43 struct nvkm_cgrp *cgrp = chan->cgrp;
50 CHAN_TRACE(chan, "%sbind cctx %d[%s]", cctx ? "" : "un", engn->id, engine->subdev.name);
58 nvkm_chan_block(chan);
59 nvkm_chan_preempt(chan, true);
62 engn->func->bind(engn, cctx, chan);
68 nvkm_chan_allow(chan);
72 nvkm_chan_cctx_put(struct nvkm_chan *chan, struct nvkm_cctx **pcctx)
79 if (refcount_dec_and_mutex_lock(&cctx->refs, &chan->cgrp->mutex)) {
80 CHAN_TRACE(chan, "dtor cctx %d[%s]", engn->id, engn->engine->subdev.name);
81 nvkm_cgrp_vctx_put(chan->cgrp, &cctx->vctx);
84 mutex_unlock(&chan->cgrp->mutex);
92 nvkm_chan_cctx_get(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_cctx **pcctx,
95 struct nvkm_cgrp *cgrp = chan->cgrp;
102 cctx = nvkm_list_find(cctx, &chan->cctxs, head,
103 cctx->vctx->ectx->engn == engn && cctx->vctx->vmm == chan->vmm);
107 mutex_unlock(&chan->cgrp->mutex);
112 ret = nvkm_cgrp_vctx_get(cgrp, engn, chan, &vctx, client);
114 CHAN_ERROR(chan, "vctx %d[%s]: %d", engn->id, engn->engine->subdev.name, ret);
119 CHAN_TRACE(chan, "ctor cctx %d[%s]", engn->id, engn->engine->subdev.name);
129 list_add_tail(&cctx->head, &chan->cctxs);
136 nvkm_chan_preempt_locked(struct nvkm_chan *chan, bool wait)
138 struct nvkm_runl *runl = chan->cgrp->runl;
140 CHAN_TRACE(chan, "preempt");
141 chan->func->preempt(chan);
149 nvkm_chan_preempt(struct nvkm_chan *chan, bool wait)
153 if (!chan->func->preempt)
156 mutex_lock(&chan->cgrp->runl->mutex);
157 ret = nvkm_chan_preempt_locked(chan, wait);
158 mutex_unlock(&chan->cgrp->runl->mutex);
163 nvkm_chan_remove_locked(struct nvkm_chan *chan)
165 struct nvkm_cgrp *cgrp = chan->cgrp;
168 if (list_empty(&chan->head))
171 CHAN_TRACE(chan, "remove");
177 list_del_init(&chan->head);
182 nvkm_chan_remove(struct nvkm_chan *chan, bool preempt)
184 struct nvkm_runl *runl = chan->cgrp->runl;
187 if (preempt && chan->func->preempt)
188 nvkm_chan_preempt_locked(chan, true);
189 nvkm_chan_remove_locked(chan);
195 nvkm_chan_insert(struct nvkm_chan *chan)
197 struct nvkm_cgrp *cgrp = chan->cgrp;
201 if (WARN_ON(!list_empty(&chan->head))) {
206 CHAN_TRACE(chan, "insert");
207 list_add_tail(&chan->head, &cgrp->chans);
219 nvkm_chan_block_locked(struct nvkm_chan *chan)
221 CHAN_TRACE(chan, "block %d", atomic_read(&chan->blocked));
222 if (atomic_inc_return(&chan->blocked) == 1)
223 chan->func->stop(chan);
227 nvkm_chan_error(struct nvkm_chan *chan, bool preempt)
231 spin_lock_irqsave(&chan->lock, flags);
232 if (atomic_inc_return(&chan->errored) == 1) {
233 CHAN_ERROR(chan, "errored - disabling channel");
234 nvkm_chan_block_locked(chan);
236 chan->func->preempt(chan);
237 nvkm_event_ntfy(&chan->cgrp->runl->chid->event, chan->id, NVKM_CHAN_EVENT_ERRORED);
239 spin_unlock_irqrestore(&chan->lock, flags);
243 nvkm_chan_block(struct nvkm_chan *chan)
245 spin_lock_irq(&chan->lock);
246 nvkm_chan_block_locked(chan);
247 spin_unlock_irq(&chan->lock);
251 nvkm_chan_allow(struct nvkm_chan *chan)
253 spin_lock_irq(&chan->lock);
254 CHAN_TRACE(chan, "allow %d", atomic_read(&chan->blocked));
255 if (atomic_dec_and_test(&chan->blocked))
256 chan->func->start(chan);
257 spin_unlock_irq(&chan->lock);
263 struct nvkm_chan *chan = *pchan;
265 if (!chan)
268 if (chan->func->ramfc->clear)
269 chan->func->ramfc->clear(chan);
271 nvkm_ramht_del(&chan->ramht);
272 nvkm_gpuobj_del(&chan->pgd);
273 nvkm_gpuobj_del(&chan->eng);
274 nvkm_gpuobj_del(&chan->cache);
275 nvkm_gpuobj_del(&chan->ramfc);
277 if (chan->cgrp) {
278 if (!chan->func->id_put)
279 nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
281 chan->func->id_put(chan);
283 nvkm_cgrp_unref(&chan->cgrp);
286 nvkm_memory_unref(&chan->userd.mem);
288 if (chan->vmm) {
289 nvkm_vmm_part(chan->vmm, chan->inst->memory);
290 nvkm_vmm_unref(&chan->vmm);
293 nvkm_gpuobj_del(&chan->push);
294 nvkm_gpuobj_del(&chan->inst);
295 kfree(chan);
301 struct nvkm_chan *chan = *pchan;
303 if (!chan)
307 spin_unlock_irqrestore(&chan->cgrp->lock, irqflags);
316 struct nvkm_chan *chan;
321 chan = nvkm_runl_chan_get_inst(runl, inst, pirqflags);
322 if (chan || engn->engine == engine)
323 return chan;
356 struct nvkm_chan *chan;
374 if (!(chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL)))
377 chan->func = func;
378 strscpy(chan->name, name, sizeof(chan->name));
379 chan->runq = runq;
380 chan->id = -1;
381 spin_lock_init(&chan->lock);
382 atomic_set(&chan->blocked, 1);
383 atomic_set(&chan->errored, 0);
384 INIT_LIST_HEAD(&chan->cctxs);
385 INIT_LIST_HEAD(&chan->head);
396 ret = nvkm_cgrp_new(runl, chan->name, vmm, fifo->func->cgrp.force, &chan->cgrp);
402 cgrp = chan->cgrp;
409 chan->cgrp = nvkm_cgrp_ref(cgrp);
414 &chan->inst);
425 ret = nvkm_vmm_join(vmm, chan->inst->memory);
431 chan->vmm = nvkm_vmm_ref(vmm);
436 ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16, &chan->push);
444 if (!chan->func->id_get) {
445 chan->id = nvkm_chid_get(runl->chid, chan);
446 if (chan->id >= 0) {
448 if (ouserd + chan->func->userd->size >=
454 ret = nvkm_memory_kmap(userd, &chan->userd.mem);
460 chan->userd.base = ouserd;
462 chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
463 chan->userd.base = chan->id * chan->func->userd->size;
467 chan->id = chan->func->id_get(chan, userd, ouserd);
470 if (chan->id < 0) {
476 cgrp->id = chan->id;
479 if (chan->func->userd->clear)
480 chan->func->userd->clear(chan);
483 ret = chan->func->ramfc->write(chan, offset, length, devm, priv);