Searched refs:wptr (Results 1 - 25 of 137) sorted by path

123456

/linux-master/drivers/media/usb/pvrusb2/
H A Dpvrusb2-debugifc.c55 const char *wptr; local
60 wptr = NULL;
68 wptr = buf;
73 *wstrPtr = wptr;
182 const char *wptr; local
186 scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
189 if (!wptr) return 0;
191 pvr2_trace(PVR2_TRACE_DEBUGIFC,"debugifc cmd: \"%.*s\"",wlen,wptr);
192 if (debugifc_match_keyword(wptr,wlen,"reset")) {
193 scnt = debugifc_isolate_word(buf,count,&wptr,
[all...]
/linux-master/drivers/watchdog/
H A Di6300esb.c98 #define to_esb_dev(wptr) container_of(wptr, struct esb_dev, wdd)
/linux-master/drivers/crypto/ccp/
H A Dtee-dev.c104 tee->rb_mgr.wptr = 0;
230 (tee->rb_mgr.ring_start + tee->rb_mgr.wptr);
237 if (!(tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
241 dev_dbg(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
242 rptr, tee->rb_mgr.wptr);
252 (tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
254 dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u response flag %u\n",
255 rptr, tee->rb_mgr.wptr, cmd->flag);
278 tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd);
279 if (tee->rb_mgr.wptr >
[all...]
H A Dtee-dev.h43 * @wptr: index to the last written entry in ring buffer
50 u32 wptr; member in struct:ring_buf_manager
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_amdkfd.h266 /* Read user wptr from a specified user address space with page fault
272 #define read_user_wptr(mmptr, wptr, dst) \
275 if ((mmptr) && (wptr)) { \
278 valid = !get_user((dst), (wptr)); \
281 valid = !get_user((dst), (wptr)); \
H A Damdgpu_amdkfd_arcturus.c125 uint32_t __user *wptr, struct mm_struct *mm)
132 uint64_t __user *wptr64 = (uint64_t __user *)wptr;
124 kgd_arcturus_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) argument
H A Damdgpu_amdkfd_arcturus.h24 uint32_t __user *wptr, struct mm_struct *mm);
H A Damdgpu_amdkfd_gc_9_4_3.c60 uint32_t __user *wptr, struct mm_struct *mm)
67 uint64_t __user *wptr64 = (uint64_t __user *)wptr;
286 uint32_t __user *wptr, uint32_t wptr_shift,
311 if (wptr) {
312 /* Don't read wptr with get_user because the user
316 * that wptr is GPU-accessible in the queue's VMID via
343 lower_32_bits((uintptr_t)wptr));
345 upper_32_bits((uintptr_t)wptr));
59 kgd_gfx_v9_4_3_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) argument
284 kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) argument
H A Damdgpu_amdkfd_gfx_v10.c210 uint32_t __user *wptr, uint32_t wptr_shift,
236 if (wptr) {
237 /* Don't read wptr with get_user because the user
241 * that wptr is GPU-accessible in the queue's VMID via
268 lower_32_bits((uint64_t)wptr));
270 upper_32_bits((uint64_t)wptr));
374 uint32_t __user *wptr, struct mm_struct *mm)
381 uint64_t __user *wptr64 = (uint64_t __user *)wptr;
208 kgd_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) argument
373 kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) argument
H A Damdgpu_amdkfd_gfx_v10_3.c181 uint32_t __user *wptr, uint32_t wptr_shift,
222 if (wptr) {
223 /* Don't read wptr with get_user because the user
227 * that wptr is GPU-accessible in the queue's VMID via
254 lower_32_bits((uint64_t)wptr));
256 upper_32_bits((uint64_t)wptr));
360 uint32_t __user *wptr, struct mm_struct *mm)
367 uint64_t __user *wptr64 = (uint64_t __user *)wptr;
179 hqd_load_v10_3(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) argument
359 hqd_sdma_load_v10_3(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) argument
H A Damdgpu_amdkfd_gfx_v11.c165 uint32_t queue_id, uint32_t __user *wptr,
207 if (wptr) {
208 /* Don't read wptr with get_user because the user
212 * that wptr is GPU-accessible in the queue's VMID via
239 lower_32_bits((uint64_t)wptr));
241 upper_32_bits((uint64_t)wptr));
345 uint32_t __user *wptr, struct mm_struct *mm)
352 uint64_t __user *wptr64 = (uint64_t __user *)wptr;
164 hqd_load_v11(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) argument
344 hqd_sdma_load_v11(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) argument
H A Damdgpu_amdkfd_gfx_v7.c161 uint32_t __user *wptr, uint32_t wptr_shift,
191 valid_wptr = read_user_wptr(mm, wptr, wptr_val);
240 uint32_t __user *wptr, struct mm_struct *mm)
271 if (read_user_wptr(mm, wptr, data))
159 kgd_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) argument
239 kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) argument
H A Damdgpu_amdkfd_gfx_v8.c156 uint32_t __user *wptr, uint32_t wptr_shift,
215 valid_wptr = read_user_wptr(mm, wptr, wptr_val);
264 uint32_t __user *wptr, struct mm_struct *mm)
294 if (read_user_wptr(mm, wptr, data))
154 kgd_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) argument
263 kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) argument
H A Damdgpu_amdkfd_gfx_v9.c224 uint32_t __user *wptr, uint32_t wptr_shift,
250 if (wptr) {
251 /* Don't read wptr with get_user because the user
255 * that wptr is GPU-accessible in the queue's VMID via
282 lower_32_bits((uintptr_t)wptr));
284 upper_32_bits((uintptr_t)wptr));
385 uint32_t __user *wptr, struct mm_struct *mm)
392 uint64_t __user *wptr64 = (uint64_t __user *)wptr;
222 kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) argument
384 kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) argument
H A Damdgpu_amdkfd_gfx_v9.h32 uint32_t queue_id, uint32_t __user *wptr,
H A Damdgpu_ih.c61 /* add 8 bytes for the rptr/wptr shadows and
124 /* add 8 bytes for the rptr/wptr shadows and
146 * Writes an IV to the ring buffer using the CPU and increment the wptr.
152 uint32_t wptr = le32_to_cpu(*ih->wptr_cpu) >> 2; local
156 ih->ring[wptr++] = cpu_to_le32(iv[i]);
158 wptr <<= 2;
159 wptr &= ih->ptr_mask;
161 /* Only commit the new wptr if we don't overflow */
162 if (wptr != READ_ONCE(ih->rptr)) {
164 WRITE_ONCE(*ih->wptr_cpu, cpu_to_le32(wptr));
211 u32 wptr; local
[all...]
H A Damdgpu_ras.c2106 while (data->rptr != data->wptr) {
2153 memcpy(&data->ring[data->wptr], info->entry,
2157 data->wptr = (data->aligned_element_size +
2158 data->wptr) % data->ring_size;
2211 .wptr = 0,
H A Damdgpu_ras.h547 unsigned int wptr; member in struct:ras_ih_data
H A Damdgpu_ring.c44 * GPU is currently reading, and a wptr (write pointer)
48 * wptr. The GPU then starts fetching commands and executes
94 ring->wptr_old = ring->wptr;
137 * Update the wptr (write pointer) to tell the GPU to
146 (ring->wptr & ring->funcs->align_mask);
158 * amdgpu_ring_undo - reset the wptr
162 * Reset the driver's copy of the wptr (all asics).
166 ring->wptr = ring->wptr_old;
467 * - wptr
468 * - driver's copy of wptr
[all...]
H A Damdgpu_ring.h251 u64 wptr; member in struct:amdgpu_ring
379 ring->ring[ring->wptr++ & ring->buf_mask] = v;
380 ring->wptr &= ring->ptr_mask;
393 occupied = ring->wptr & ring->buf_mask;
410 ring->wptr += count_dw;
411 ring->wptr &= ring->ptr_mask;
433 cur = (ring->wptr - 1) & ring->buf_mask;
H A Damdgpu_ring_mux.c212 void amdgpu_ring_mux_set_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, u64 wptr) argument
228 /* We could skip this set wptr as preemption in process. */
238 e->sw_wptr = wptr;
239 e->start_ptr_in_hw_ring = mux->real_ring->wptr;
242 if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT || mux->wptr_resubmit < wptr) {
243 amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, ring, e->sw_cptr, wptr);
244 e->end_ptr_in_hw_ring = mux->real_ring->wptr;
247 e->end_ptr_in_hw_ring = mux->real_ring->wptr;
339 amdgpu_ring_mux_set_wptr(mux, ring, ring->wptr);
427 offset = ring->wptr
[all...]
H A Damdgpu_ring_mux.h108 void amdgpu_ring_mux_set_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, u64 wptr);
H A Damdgpu_umsch_mm.c524 WDOORBELL32(ring->doorbell_index, ring->wptr << 2);
526 WREG32(umsch->rb_wptr, ring->wptr << 2);
H A Damdgpu_vcn.c1106 write_pos = plog->wptr;
1186 log_buf->wptr = log_buf->header_size;
H A Damdgpu_vcn.h337 uint32_t wptr; member in struct:amdgpu_fw_shared_rb_ptrs_struct
435 uint32_t wptr; member in struct:amdgpu_vcn_fwlog

Completed in 345 milliseconds

123456