Lines Matching refs:disp

40 	struct nvkm_device *device = ior->disp->engine.subdev.device;
56 struct nvkm_device *device = sor->disp->engine.subdev.device;
65 struct nvkm_device *device = sor->disp->engine.subdev.device;
75 struct nvkm_device *device = sor->disp->engine.subdev.device;
102 struct nvkm_device *device = ior->disp->engine.subdev.device;
126 struct nvkm_device *device = ior->disp->engine.subdev.device;
148 struct nvkm_device *device = ior->disp->engine.subdev.device;
186 struct nvkm_device *device = sor->disp->engine.subdev.device;
222 gv100_sor_new(struct nvkm_disp *disp, int id)
224 struct nvkm_device *device = disp->engine.subdev.device;
230 return nvkm_ior_new_(&gv100_sor, disp, SOR, id, hda & BIT(id));
234 gv100_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
236 struct nvkm_device *device = disp->engine.subdev.device;
245 struct nvkm_device *device = head->disp->engine.subdev.device;
252 struct nvkm_device *device = head->disp->engine.subdev.device;
259 struct nvkm_device *device = head->disp->engine.subdev.device;
269 struct nvkm_device *device = head->disp->engine.subdev.device;
309 gv100_head_new(struct nvkm_disp *disp, int id)
311 struct nvkm_device *device = disp->engine.subdev.device;
316 return nvkm_head_new_(&gv100_head, disp, id);
320 gv100_head_cnt(struct nvkm_disp *disp, unsigned long *pmask)
322 struct nvkm_device *device = disp->engine.subdev.device;
342 struct nvkm_device *device = chan->disp->engine.subdev.device;
356 return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -9, handle,
363 struct nvkm_device *device = chan->disp->engine.subdev.device;
375 struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
395 struct nvkm_device *device = chan->disp->engine.subdev.device;
521 struct nvkm_device *device = chan->disp->engine.subdev.device;
546 gv100_disp_wndw_cnt(struct nvkm_disp *disp, unsigned long *pmask)
548 struct nvkm_device *device = disp->engine.subdev.device;
557 struct nvkm_device *device = chan->disp->engine.subdev.device;
570 struct nvkm_device *device = chan->disp->engine.subdev.device;
579 struct nvkm_device *device = chan->disp->engine.subdev.device;
589 struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
724 struct nvkm_device *device = chan->disp->engine.subdev.device;
743 struct nvkm_device *device = chan->disp->engine.subdev.device;
752 struct nvkm_device *device = chan->disp->engine.subdev.device;
762 struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
798 struct nvkm_disp *disp;
806 struct nvkm_device *device = caps->disp->engine.subdev.device;
822 struct nvkm_disp *disp = nvkm_udisp(oclass->parent);
830 caps->disp = disp;
837 struct nvkm_disp *disp = container_of(work, struct nvkm_disp, super.work);
838 struct nvkm_subdev *subdev = &disp->engine.subdev;
843 mutex_lock(&disp->super.mutex);
846 nvkm_debug(subdev, "supervisor %d: %08x\n", ffs(disp->super.pending), stat);
847 list_for_each_entry(head, &disp->heads, head) {
852 if (disp->super.pending & 0x00000001) {
853 nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
854 nv50_disp_super_1(disp);
855 list_for_each_entry(head, &disp->heads, head) {
858 nv50_disp_super_1_0(disp, head);
861 if (disp->super.pending & 0x00000002) {
862 list_for_each_entry(head, &disp->heads, head) {
865 nv50_disp_super_2_0(disp, head);
867 list_for_each_entry(head, &disp->heads, head) {
870 nv50_disp_super_2_1(disp, head);
872 list_for_each_entry(head, &disp->heads, head) {
875 nv50_disp_super_2_2(disp, head);
878 if (disp->super.pending & 0x00000004) {
879 list_for_each_entry(head, &disp->heads, head) {
882 nv50_disp_super_3_0(disp, head);
886 list_for_each_entry(head, &disp->heads, head)
890 mutex_unlock(&disp->super.mutex);
894 gv100_disp_exception(struct nvkm_disp *disp, int chid)
896 struct nvkm_subdev *subdev = &disp->engine.subdev;
922 if (chid < ARRAY_SIZE(disp->chan) && disp->chan[chid]) {
925 nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
936 gv100_disp_intr_ctrl_disp(struct nvkm_disp *disp)
938 struct nvkm_subdev *subdev = &disp->engine.subdev;
943 disp->super.pending = (stat & 0x00000007);
944 queue_work(disp->super.wq, &disp->super.work);
945 nvkm_wr32(device, 0x611860, disp->super.pending);
971 nv50_disp_chan_uevent_send(disp, 0);
974 for_each_set_bit(wndw, &wndws, disp->wndw.nr) {
975 nv50_disp_chan_uevent_send(disp, 1 + wndw);
984 gv100_disp_intr_exc_other(struct nvkm_disp *disp)
986 struct nvkm_subdev *subdev = &disp->engine.subdev;
994 gv100_disp_exception(disp, 0);
999 for_each_set_bit(head, &mask, disp->wndw.nr) {
1001 gv100_disp_exception(disp, 73 + head);
1013 gv100_disp_intr_exc_winim(struct nvkm_disp *disp)
1015 struct nvkm_subdev *subdev = &disp->engine.subdev;
1020 for_each_set_bit(wndw, &stat, disp->wndw.nr) {
1022 gv100_disp_exception(disp, 33 + wndw);
1033 gv100_disp_intr_exc_win(struct nvkm_disp *disp)
1035 struct nvkm_subdev *subdev = &disp->engine.subdev;
1040 for_each_set_bit(wndw, &stat, disp->wndw.nr) {
1042 gv100_disp_exception(disp, 1 + wndw);
1053 gv100_disp_intr_head_timing(struct nvkm_disp *disp, int head)
1055 struct nvkm_subdev *subdev = &disp->engine.subdev;
1066 nvkm_disp_vblank(disp, head);
1078 gv100_disp_intr(struct nvkm_disp *disp)
1080 struct nvkm_subdev *subdev = &disp->engine.subdev;
1088 gv100_disp_intr_head_timing(disp, head);
1094 gv100_disp_intr_exc_win(disp);
1099 gv100_disp_intr_exc_winim(disp);
1104 gv100_disp_intr_exc_other(disp);
1109 gv100_disp_intr_ctrl_disp(disp);
1118 gv100_disp_fini(struct nvkm_disp *disp, bool suspend)
1120 struct nvkm_device *device = disp->engine.subdev.device;
1125 gv100_disp_init(struct nvkm_disp *disp)
1127 struct nvkm_device *device = disp->engine.subdev.device;
1147 for (i = 0; i < disp->sor.nr; i++) {
1154 list_for_each_entry(head, &disp->heads, head) {
1169 for (i = 0; i < disp->wndw.nr; i++) {
1186 switch (nvkm_memory_target(disp->inst->memory)) {
1194 nvkm_wr32(device, 0x610014, disp->inst->addr >> 16);
1201 nvkm_wr32(device, 0x611cec, disp->head.mask << 16 |
1206 nvkm_wr32(device, 0x611ce8, disp->wndw.mask); /* MSK. */
1210 nvkm_wr32(device, 0x611ce4, disp->wndw.mask); /* MSK. */
1214 list_for_each_entry(head, &disp->heads, head) {