Lines Matching refs:intr

22 #include <core/intr.h>
29 nvkm_intr_xlat(struct nvkm_subdev *subdev, struct nvkm_intr *intr,
36 const struct nvkm_intr_data *data = intr->data;
42 if (tdev->intr >= 0 &&
45 if (data->mask & BIT(tdev->intr)) {
47 *mask = BIT(tdev->intr);
65 if (type < intr->leaves * sizeof(*intr->stat) * 8) {
78 struct nvkm_intr *intr;
81 list_for_each_entry(intr, &subdev->device->intr.intr, head) {
82 ret = nvkm_intr_xlat(subdev, intr, type, leaf, mask);
84 return intr;
91 nvkm_intr_allow_locked(struct nvkm_intr *intr, int leaf, u32 mask)
93 intr->mask[leaf] |= mask;
94 if (intr->func->allow) {
95 if (intr->func->reset)
96 intr->func->reset(intr, leaf, mask);
97 intr->func->allow(intr, leaf, mask);
105 struct nvkm_intr *intr;
110 intr = nvkm_intr_find(subdev, type, &leaf, &mask);
111 if (intr) {
112 nvkm_debug(intr->subdev, "intr %d/%08x allowed by %s\n", leaf, mask, subdev->name);
113 spin_lock_irqsave(&device->intr.lock, flags);
114 nvkm_intr_allow_locked(intr, leaf, mask);
115 spin_unlock_irqrestore(&device->intr.lock, flags);
120 nvkm_intr_block_locked(struct nvkm_intr *intr, int leaf, u32 mask)
122 intr->mask[leaf] &= ~mask;
123 if (intr->func->block)
124 intr->func->block(intr, leaf, mask);
131 struct nvkm_intr *intr;
136 intr = nvkm_intr_find(subdev, type, &leaf, &mask);
137 if (intr) {
138 nvkm_debug(intr->subdev, "intr %d/%08x blocked by %s\n", leaf, mask, subdev->name);
139 spin_lock_irqsave(&device->intr.lock, flags);
140 nvkm_intr_block_locked(intr, leaf, mask);
141 spin_unlock_irqrestore(&device->intr.lock, flags);
148 struct nvkm_intr *intr;
150 list_for_each_entry(intr, &device->intr.intr, head)
151 intr->func->rearm(intr);
157 struct nvkm_intr *intr;
159 list_for_each_entry(intr, &device->intr.intr, head)
160 intr->func->unarm(intr);
167 struct nvkm_intr *intr;
174 spin_lock(&device->intr.lock);
175 if (!device->intr.armed)
182 list_for_each_entry(intr, &device->intr.intr, head) {
183 if (intr->func->pending(intr))
195 for (prio = 0; prio < ARRAY_SIZE(device->intr.prio); prio++) {
196 list_for_each_entry(inth, &device->intr.prio[prio], head) {
197 struct nvkm_intr *intr = inth->intr;
199 if (intr->stat[inth->leaf] & inth->mask) {
201 if (intr->func->reset)
202 intr->func->reset(intr, inth->leaf, inth->mask);
212 list_for_each_entry(intr, &device->intr.intr, head) {
213 for (leaf = 0; leaf < intr->leaves; leaf++) {
214 if (intr->stat[leaf]) {
215 nvkm_debug(intr->subdev, "intr%d: %08x\n",
216 leaf, intr->stat[leaf]);
217 nvkm_intr_block_locked(intr, leaf, intr->stat[leaf]);
227 spin_unlock(&device->intr.lock);
233 struct nvkm_subdev *subdev, int leaves, struct nvkm_intr *intr)
238 intr->func = func;
239 intr->data = data;
240 intr->subdev = subdev;
241 intr->leaves = leaves;
242 intr->stat = kcalloc(leaves, sizeof(*intr->stat), GFP_KERNEL);
243 intr->mask = kcalloc(leaves, sizeof(*intr->mask), GFP_KERNEL);
244 if (!intr->stat || !intr->mask) {
245 kfree(intr->stat);
249 if (intr->subdev->debug >= NV_DBG_DEBUG) {
250 for (i = 0; i < intr->leaves; i++)
251 intr->mask[i] = ~0;
254 spin_lock_irq(&device->intr.lock);
255 list_add_tail(&intr->head, &device->intr.intr);
256 spin_unlock_irq(&device->intr.lock);
270 nvkm_intr_subdev_add_dev(struct nvkm_intr *intr, enum nvkm_subdev_type type, int inst)
276 subdev = nvkm_device_subdev(intr->subdev->device, type, inst);
277 if (!subdev || !subdev->func->intr)
285 ret = nvkm_inth_add(intr, NVKM_INTR_SUBDEV, prio, subdev, nvkm_intr_subdev, &subdev->inth);
293 nvkm_intr_subdev_add(struct nvkm_intr *intr)
296 struct nvkm_device *device = intr->subdev->device;
299 for (data = intr->data; data && data->mask; data++) {
303 if (tdev->intr < 0 || !(data->mask & BIT(tdev->intr)))
306 nvkm_intr_subdev_add_dev(intr, tdev->type, tdev->inst);
309 nvkm_intr_subdev_add_dev(intr, data->type, data->inst);
318 struct nvkm_intr *intr;
321 if (unlikely(!device->intr.legacy_done)) {
322 list_for_each_entry(intr, &device->intr.intr, head)
323 nvkm_intr_subdev_add(intr);
324 device->intr.legacy_done = true;
327 spin_lock_irq(&device->intr.lock);
328 list_for_each_entry(intr, &device->intr.intr, head) {
329 for (i = 0; intr->func->block && i < intr->leaves; i++) {
330 intr->func->block(intr, i, ~0);
331 intr->func->allow(intr, i, intr->mask[i]);
336 device->intr.armed = true;
337 spin_unlock_irq(&device->intr.lock);
343 spin_lock_irq(&device->intr.lock);
345 device->intr.armed = false;
346 spin_unlock_irq(&device->intr.lock);
354 device->intr.irq = device->func->irq(device);
355 if (device->intr.irq < 0)
356 return device->intr.irq;
358 ret = request_irq(device->intr.irq, nvkm_intr, IRQF_SHARED, "nvkm", device);
362 device->intr.alloc = true;
369 struct nvkm_intr *intr, *intt;
371 list_for_each_entry_safe(intr, intt, &device->intr.intr, head) {
372 list_del(&intr->head);
373 kfree(intr->mask);
374 kfree(intr->stat);
377 if (device->intr.alloc)
378 free_irq(device->intr.irq, device);
386 INIT_LIST_HEAD(&device->intr.intr);
387 for (i = 0; i < ARRAY_SIZE(device->intr.prio); i++)
388 INIT_LIST_HEAD(&device->intr.prio[i]);
390 spin_lock_init(&device->intr.lock);
391 device->intr.armed = false;
397 if (unlikely(!inth->intr))
406 struct nvkm_intr *intr = inth->intr;
409 if (unlikely(!inth->intr))
412 spin_lock_irqsave(&intr->subdev->device->intr.lock, flags);
414 if ((intr->mask[inth->leaf] & inth->mask) != inth->mask)
415 nvkm_intr_allow_locked(intr, inth->leaf, inth->mask);
417 spin_unlock_irqrestore(&intr->subdev->device->intr.lock, flags);
421 nvkm_inth_add(struct nvkm_intr *intr, enum nvkm_intr_type type, enum nvkm_intr_prio prio,
430 ret = nvkm_intr_xlat(subdev, intr, type, &inth->leaf, &inth->mask);
434 nvkm_debug(intr->subdev, "intr %d/%08x requested by %s\n",
437 inth->intr = intr;
440 list_add_tail(&inth->head, &device->intr.prio[prio]);