Lines Matching refs:buffer

25 static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
27 u32 *vaddr = (u32 *)buffer->vaddr;
29 BUG_ON(buffer->user_size >= buffer->size);
31 vaddr[buffer->user_size / 4] = data;
32 buffer->user_size += 4;
35 static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
40 buffer->user_size = ALIGN(buffer->user_size, 8);
43 OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
46 OUT(buffer, value);
49 static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
51 buffer->user_size = ALIGN(buffer->user_size, 8);
53 OUT(buffer, VIV_FE_END_HEADER_OP_END);
56 static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer,
59 buffer->user_size = ALIGN(buffer->user_size, 8);
61 OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | waitcycles);
64 static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
67 buffer->user_size = ALIGN(buffer->user_size, 8);
69 OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
71 OUT(buffer, address);
74 static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
77 buffer->user_size = ALIGN(buffer->user_size, 8);
79 OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
80 OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
83 static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
85 CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
91 struct etnaviv_cmdbuf *buffer, u8 pipe)
108 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
109 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
110 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
112 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
137 static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
140 u32 *lw = buffer->vaddr + wl_offset;
149 * Ensure that there is space in the command buffer to contiguously write
150 * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
153 struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
155 if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
156 buffer->user_size = 0;
158 return etnaviv_cmdbuf_get_va(buffer,
160 buffer->user_size;
165 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
169 /* initialize buffer */
170 buffer->user_size = 0;
172 CMD_WAIT(buffer, gpu->fe_waitcycles);
173 CMD_LINK(buffer, 2,
174 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
175 + buffer->user_size - 4);
177 return buffer->user_size / 8;
182 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
186 buffer->user_size = 0;
189 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
191 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
193 CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
194 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
195 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
199 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
201 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
203 CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
204 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
205 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
208 CMD_END(buffer);
210 buffer->user_size = ALIGN(buffer->user_size, 8);
212 return buffer->user_size / 8;
217 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
221 buffer->user_size = 0;
223 CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
226 CMD_END(buffer);
228 buffer->user_size = ALIGN(buffer->user_size, 8);
230 return buffer->user_size / 8;
235 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
236 unsigned int waitlink_offset = buffer->user_size - 16;
258 link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
260 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
261 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
263 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
264 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
265 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
266 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
268 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
271 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
272 CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
273 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
275 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
279 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
280 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
282 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
283 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
284 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
285 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
287 CMD_END(buffer);
289 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
295 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
300 /* Append a 'sync point' to the ring buffer. */
303 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
304 unsigned int waitlink_offset = buffer->user_size - 16;
314 target = etnaviv_buffer_reserve(gpu, buffer, dwords);
317 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
321 CMD_END(buffer);
324 CMD_WAIT(buffer, gpu->fe_waitcycles);
325 CMD_LINK(buffer, 2,
326 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
327 + buffer->user_size - 4);
331 * WAIT with a link to the address in the ring buffer.
333 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
339 /* Append a command buffer to the ring buffer. */
344 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
345 unsigned int waitlink_offset = buffer->user_size - 16;
358 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
365 * If we need maintenance prior to submitting this buffer, we will
367 * link to this buffer - a total of four additional words.
391 target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
408 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
422 CMD_LOAD_STATE(buffer,
430 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
432 CMD_SEM(buffer, SYNC_RECIPIENT_FE,
434 CMD_STALL(buffer, SYNC_RECIPIENT_FE,
442 etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
446 /* And the link to the submitted buffer */
449 CMD_LINK(buffer, link_dwords, link_target);
457 * Append a LINK to the submitted command buffer to return to
458 * the ring buffer. return_target is the ring target address.
472 return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
477 * the wait command to the ring buffer.
480 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
483 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
487 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
488 CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
489 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
491 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
495 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
496 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
499 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
500 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
501 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
502 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
505 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
507 CMD_WAIT(buffer, gpu->fe_waitcycles);
508 CMD_LINK(buffer, 2,
509 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
510 + buffer->user_size - 4);
522 pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
530 * WAIT with a link to the address in the ring buffer.
532 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
538 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);