Lines Matching defs:obj

107 	struct a6xx_state_memobj *obj =
108 kvzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL);
110 if (!obj)
113 list_add_tail(&obj->node, &a6xx_state->objs);
114 return &obj->data;
251 struct a6xx_gpu_state_obj *obj)
256 obj->data = state_kcalloc(a6xx_state, VBIF_DEBUGBUS_BLOCK_SIZE,
258 if (!obj->data)
261 obj->handle = NULL;
276 ptr = obj->data;
306 struct a6xx_gpu_state_obj *obj)
311 obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
312 if (!obj->data)
315 obj->handle = block;
317 for (ptr = obj->data, i = 0; i < block->count; i++)
324 struct a6xx_gpu_state_obj *obj)
329 obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
330 if (!obj->data)
333 obj->handle = block;
335 for (ptr = obj->data, i = 0; i < block->count; i++)
551 struct a6xx_gpu_state_obj *obj,
589 obj->handle = dbgahb;
590 obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
597 struct a6xx_gpu_state_obj *obj,
631 obj->handle = dbgahb;
632 obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
694 struct a6xx_gpu_state_obj *obj,
746 obj->handle = cluster;
747 obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
754 struct a6xx_gpu_state_obj *obj,
791 obj->handle = cluster;
792 obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
853 struct a6xx_gpu_state_obj *obj,
879 obj->handle = block;
880 obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
887 struct a6xx_gpu_state_obj *obj,
924 obj->handle = block;
925 obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
991 struct a6xx_gpu_state_obj *obj,
1020 obj->handle = regs;
1021 obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
1029 struct a6xx_gpu_state_obj *obj,
1063 obj->handle = regs;
1064 obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
1071 struct a6xx_gpu_state_obj *obj,
1100 obj->handle = regs->regs;
1101 obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
1110 struct a6xx_gpu_state_obj *obj)
1122 obj->handle = (const void *) regs;
1123 obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
1124 if (!obj->data)
1132 obj->data[index++] = gpu_read(gpu,
1140 struct a6xx_gpu_state_obj *obj)
1147 obj->handle = (const void *) regs;
1148 obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
1149 if (!obj->data)
1157 obj->data[index++] = gpu_read(gpu, regs[i] + j);
1164 struct a6xx_gpu_state_obj *obj)
1169 a7xx_get_ahb_gpu_registers(gpu, a6xx_state, regs->regs, obj);
1176 struct a6xx_gpu_state_obj *obj,
1187 obj->handle = (const void *) regs;
1188 obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
1189 if (!obj->data)
1205 obj->data[index++] = val;
1442 struct a6xx_gpu_state_obj *obj)
1447 obj->handle = (const void *) indexed;
1451 obj->data = state_kcalloc(a6xx_state, count, sizeof(u32));
1452 obj->count = count;
1453 if (!obj->data)
1461 obj->data[i] = gpu_read(gpu, indexed->data);
1640 struct a6xx_state_memobj *obj, *tmp;
1655 list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node) {
1656 list_del(&obj->node);
1657 kvfree(obj);
1756 static void a6xx_show_shader(struct a6xx_gpu_state_obj *obj,
1759 const struct a6xx_shader_block *block = obj->handle;
1762 if (!obj->handle)
1771 if (!obj->data)
1775 obj->data + (block->size * i));
1779 static void a7xx_show_shader(struct a6xx_gpu_state_obj *obj,
1782 const struct gen7_shader_block *block = obj->handle;
1784 u32 *data = obj->data;
1786 if (!obj->handle)
1799 if (!obj->data)
1835 static void a6xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj,
1838 const struct a6xx_dbgahb_cluster *dbgahb = obj->handle;
1843 obj->data, p);
1847 static void a6xx_show_cluster(struct a6xx_gpu_state_obj *obj,
1850 const struct a6xx_cluster *cluster = obj->handle;
1855 obj->data, p);
1859 static void a7xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj,
1862 const struct gen7_sptp_cluster_registers *dbgahb = obj->handle;
1868 a7xx_show_registers_indented(dbgahb->regs, obj->data, p, 4);
1872 static void a7xx_show_cluster(struct a6xx_gpu_state_obj *obj,
1875 const struct gen7_cluster_registers *cluster = obj->handle;
1883 a7xx_show_registers_indented(cluster->regs, obj->data, p, 4);
1887 static void a6xx_show_indexed_regs(struct a6xx_gpu_state_obj *obj,
1890 const struct a6xx_indexed_registers *indexed = obj->handle;
1896 drm_printf(p, " dwords: %d\n", obj->count);
1898 print_ascii85(p, obj->count << 2, obj->data);
1923 struct a6xx_gpu_state_obj *obj = &a6xx_state->debugbus[i];
1925 a6xx_show_debugbus_block(obj->handle, obj->data, p);
1929 struct a6xx_gpu_state_obj *obj = a6xx_state->vbif_debugbus;
1935 print_ascii85(p, VBIF_DEBUGBUS_BLOCK_SIZE << 2, obj->data);
1939 struct a6xx_gpu_state_obj *obj = &a6xx_state->cx_debugbus[i];
1941 a6xx_show_debugbus_block(obj->handle, obj->data, p);
2000 struct a6xx_gpu_state_obj *obj = &a6xx_state->registers[i];
2002 if (!obj->handle)
2006 a7xx_show_registers(obj->handle, obj->data, p);
2008 const struct a6xx_registers *regs = obj->handle;
2010 a6xx_show_registers(regs->registers, obj->data, regs->count, p);
2016 struct a6xx_gpu_state_obj *obj = &a6xx_state->gmu_registers[i];
2017 const struct a6xx_registers *regs = obj->handle;
2019 if (!obj->handle)
2022 a6xx_show_registers(regs->registers, obj->data, regs->count, p);