/linux-master/mm/ |
H A D | mmu_gather.c | 20 struct mmu_gather_batch *batch; local 26 batch = tlb->active; 27 if (batch->next) { 28 tlb->active = batch->next; 35 batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); 36 if (!batch) 40 batch->next = NULL; 41 batch->nr = 0; 42 batch->max = MAX_GATHER_BATCH; 44 tlb->active->next = batch; 51 tlb_flush_rmap_batch(struct mmu_gather_batch *batch, struct vm_area_struct *vma) argument 101 __tlb_batch_free_encoded_pages(struct mmu_gather_batch *batch) argument 146 struct mmu_gather_batch *batch; local 155 struct mmu_gather_batch *batch, *next; local 169 struct mmu_gather_batch *batch; local 222 __tlb_remove_table_free(struct mmu_table_batch *batch) argument 285 tlb_remove_table_free(struct mmu_table_batch *batch) argument 292 tlb_remove_table_free(struct mmu_table_batch *batch) argument 322 struct mmu_table_batch **batch = &tlb->batch; local 333 struct mmu_table_batch **batch = &tlb->batch; local [all...] |
/linux-master/include/trace/events/ |
H A D | intel_ifs.h | 13 TP_PROTO(int batch, int start, int stop, u64 status), 15 TP_ARGS(batch, start, stop, status), 18 __field( int, batch ) 25 __entry->batch = batch; 31 TP_printk("batch: %.2d, start: %.4x, stop: %.4x, status: %.16llx", 32 __entry->batch,
|
/linux-master/arch/powerpc/include/asm/book3s/64/ |
H A D | tlbflush-hash.h | 25 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 31 struct ppc64_tlb_batch *batch; local 40 batch = this_cpu_ptr(&ppc64_tlb_batch); 41 batch->active = 1; 46 struct ppc64_tlb_batch *batch; local 50 batch = this_cpu_ptr(&ppc64_tlb_batch); 52 if (batch->index) 53 __flush_tlb_pending(batch); 54 batch->active = 0;
|
/linux-master/arch/powerpc/mm/book3s64/ |
H A D | hash_tlb.c | 37 * immediately or will batch it up if the current CPU has an active 38 * batch on it. 44 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); local 51 i = batch->index; 100 * Check if we have an active batch on this CPU. If not, just 103 if (!batch->active) { 110 * This can happen when we are in the middle of a TLB batch and 113 * up scanning and resetting referenced bits then our batch context 117 * batch 119 if (i != 0 && (mm != batch 144 __flush_tlb_pending(struct ppc64_tlb_batch *batch) argument [all...] |
/linux-master/drivers/gpu/drm/i915/selftests/ |
H A D | igt_spinner.c | 97 if (!spin->batch) { 105 spin->batch = vaddr; 131 u32 *batch; local 139 if (!spin->batch) { 160 batch = spin->batch; 163 *batch++ = MI_STORE_DWORD_IMM_GEN4; 164 *batch++ = lower_32_bits(hws_address(hws, rq)); 165 *batch++ = upper_32_bits(hws_address(hws, rq)); 167 *batch [all...] |
H A D | igt_spinner.h | 25 u32 *batch; member in struct:igt_spinner
|
/linux-master/drivers/iommu/iommufd/ |
H A D | pages.c | 275 static void batch_clear(struct pfn_batch *batch) argument 277 batch->total_pfns = 0; 278 batch->end = 0; 279 batch->pfns[0] = 0; 280 batch->npfns[0] = 0; 285 * batch 287 static void batch_clear_carry(struct pfn_batch *batch, unsigned int keep_pfns) argument 290 return batch_clear(batch); 293 WARN_ON(!batch->end || 294 batch 303 batch_skip_carry(struct pfn_batch *batch, unsigned int skip_pfns) argument 315 __batch_init(struct pfn_batch *batch, size_t max_pages, void *backup, size_t backup_len) argument 332 batch_init(struct pfn_batch *batch, size_t max_pages) argument 337 batch_init_backup(struct pfn_batch *batch, size_t max_pages, void *backup, size_t backup_len) argument 343 batch_destroy(struct pfn_batch *batch, void *backup) argument 350 batch_add_pfn(struct pfn_batch *batch, unsigned long pfn) argument 375 batch_from_domain(struct pfn_batch *batch, struct iommu_domain *domain, struct iopt_area *area, unsigned long start_index, unsigned long last_index) argument 426 batch_from_domain_continue(struct pfn_batch *batch, struct iommu_domain *domain, struct iopt_area *area, unsigned long start_index, unsigned long last_index) argument 475 batch_to_domain(struct pfn_batch *batch, struct iommu_domain *domain, struct iopt_area *area, unsigned long start_index) argument 519 batch_from_xarray(struct pfn_batch *batch, struct xarray *xa, unsigned long start_index, unsigned long last_index) argument 540 batch_from_xarray_clear(struct pfn_batch *batch, struct xarray *xa, unsigned long start_index, unsigned long last_index) argument 615 batch_from_pages(struct pfn_batch *batch, struct page **pages, size_t npages) argument 625 batch_unpin(struct pfn_batch *batch, struct iopt_pages *pages, unsigned int first_page_off, size_t npages) argument 666 batch_rw(struct pfn_batch *batch, void *data, unsigned long offset, unsigned long length, unsigned int flags) argument 939 struct pfn_batch batch; member in struct:pfn_reader 1197 iopt_area_unpin_domain(struct pfn_batch *batch, struct iopt_area *area, struct iopt_pages *pages, struct iommu_domain *domain, unsigned long start_index, unsigned long last_index, unsigned long *unmapped_end_index, unsigned long real_last_index) argument 1266 struct pfn_batch batch; local 1519 iopt_pages_unpin_xarray(struct pfn_batch *batch, struct iopt_pages *pages, unsigned long start_index, unsigned long end_index) argument 1548 struct pfn_batch batch; local [all...] |
/linux-master/tools/testing/selftests/bpf/progs/ |
H A D | test_bpf_ma.c | 60 static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx) argument 66 for (i = 0; i < batch; i++) { 87 static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx) argument 93 for (i = 0; i < batch; i++) { 109 static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch, argument 116 for (i = 0; i < batch; i++) { 137 static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch, argument 144 for (i = 0; i < batch; i++) { 158 #define CALL_BATCH_ALLOC(size, batch, idx) \ 159 batch_alloc((struct bpf_map *)(&array_##size), batch, id [all...] |
/linux-master/drivers/gpu/drm/i915/ |
H A D | i915_cmd_parser.h | 19 struct i915_vma *batch,
|
/linux-master/drivers/gpu/drm/i915/gt/ |
H A D | gen7_renderclear.c | 235 gen7_emit_state_base_address(struct batch_chunk *batch, argument 238 u32 *cs = batch_alloc_items(batch, 0, 10); 242 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; 244 *cs++ = (batch_addr(batch) + surface_state_base) | BASE_ADDRESS_MODIFY; 246 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; 248 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; 250 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; 257 batch_advance(batch, cs); 261 gen7_emit_vfe_state(struct batch_chunk *batch, argument 267 u32 *cs = batch_alloc_items(batch, 3 290 gen7_emit_interface_descriptor_load(struct batch_chunk *batch, const u32 interface_descriptor, unsigned int count) argument 309 gen7_emit_media_object(struct batch_chunk *batch, unsigned int media_object_index) argument 340 gen7_emit_pipeline_flush(struct batch_chunk *batch) argument 355 gen7_emit_pipeline_invalidate(struct batch_chunk *batch) argument 435 u32 *batch; local [all...] |
H A D | gen8_engine_cs.h | 53 __gen8_emit_pipe_control(u32 *batch, u32 bit_group_0, argument 56 memset(batch, 0, 6 * sizeof(u32)); 58 batch[0] = GFX_OP_PIPE_CONTROL(6) | bit_group_0; 59 batch[1] = bit_group_1; 60 batch[2] = offset; 62 return batch + 6; 65 static inline u32 *gen8_emit_pipe_control(u32 *batch, argument 68 return __gen8_emit_pipe_control(batch, 0, bit_group_1, offset); 71 static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 bit_group_0, argument 74 return __gen8_emit_pipe_control(batch, bit_group_ [all...] |
H A D | intel_renderstate.h | 19 const u32 *batch; member in struct:intel_renderstate_rodata 26 .batch = gen ## _g ## _null_state_batch, \
|
H A D | intel_lrc.c | 99 /* Close the batch; used mainly by live_lrc_layout() */ 969 * A context is actually a big batch buffer with several 1366 * batch buffer to ensure the value takes effect properly. All other bits 1662 * but there is a slight complication as this is applied in WA batch where the 1668 * it for a short period and this batch in non-premptible. We can ofcourse 1676 gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) argument 1679 *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; 1680 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); 1681 *batch++ = intel_gt_scratch_offset(engine->gt, 1683 *batch 1718 gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) argument 1756 emit_lri(u32 *batch, const struct lri *lri, unsigned int count) argument 1770 gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) argument 1883 void *batch, *batch_ptr; local [all...] |
H A D | selftest_hangcheck.c | 38 u32 *batch; member in struct:hang 81 h->batch = vaddr; 113 u32 *batch; local 133 h->batch = vaddr; 171 batch = h->batch; 173 *batch++ = MI_STORE_DWORD_IMM_GEN4; 174 *batch++ = lower_32_bits(hws_address(hws, rq)); 175 *batch++ = upper_32_bits(hws_address(hws, rq)); 176 *batch [all...] |
/linux-master/drivers/gpu/drm/i915/gem/selftests/ |
H A D | igt_gem_utils.c | 116 struct i915_vma *batch; local 123 batch = igt_emit_store_dw(vma, offset, count, val); 124 if (IS_ERR(batch)) 125 return PTR_ERR(batch); 133 err = igt_vma_move_to_active_unlocked(batch, rq, 0); 146 i915_vma_offset(batch), 147 i915_vma_size(batch), 155 i915_vma_unpin_and_release(&batch, 0);
|
H A D | i915_gem_client_blt.c | 103 struct i915_vma *batch; member in struct:tiled_blits 142 struct drm_i915_gem_object *batch) 144 const int ver = GRAPHICS_VER(to_i915(batch->base.dev)); 149 cs = i915_gem_object_pin_map_unlocked(batch, I915_MAP_WC); 169 if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50)) 180 if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50)) 251 i915_gem_object_flush_map(batch); 252 i915_gem_object_unpin_map(batch); 265 i915_vma_put(t->batch); 304 t->batch 139 prepare_blit(const struct tiled_blits *t, struct blit_buffer *dst, struct blit_buffer *src, struct drm_i915_gem_object *batch) argument [all...] |
/linux-master/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_mob.c | 238 struct vmw_otable_batch *batch) 242 struct vmw_otable *otables = batch->otables; 247 for (i = 0; i < batch->num_otables; ++i) { 257 &batch->otable_bo); 262 for (i = 0; i < batch->num_otables; ++i) { 263 if (!batch->otables[i].enabled) 267 &batch->otable_bo->tbo, 278 for (i = 0; i < batch->num_otables; ++i) { 279 if (batch->otables[i].enabled) 281 &batch 237 vmw_otable_batch_setup(struct vmw_private *dev_priv, struct vmw_otable_batch *batch) argument 332 vmw_otable_batch_takedown(struct vmw_private *dev_priv, struct vmw_otable_batch *batch) argument [all...] |
/linux-master/arch/riscv/mm/ |
H A D | tlbflush.c | 221 void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, argument 225 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); 233 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) argument 235 __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0, 237 cpumask_clear(&batch->cpumask);
|
/linux-master/net/core/ |
H A D | netclassid_cgroup.c | 64 unsigned int batch; member in struct:update_classid_context 76 if (--ctx->batch == 0) { 77 ctx->batch = UPDATE_CLASSID_BATCH; 87 .batch = UPDATE_CLASSID_BATCH
|
/linux-master/tools/testing/selftests/bpf/map_tests/ |
H A D | htab_map_batch_ops.c | 79 __u32 batch, count, total, total_success; local 109 err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys, 119 err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys, 127 err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys, 153 total ? &batch : NULL, 154 &batch, keys + total, 191 CHECK((err && errno != ENOENT), "delete batch", 216 total ? &batch : NULL, 217 &batch, keys + total,
|
H A D | lpm_trie_map_batch_ops.c | 73 __u64 batch = 0; local 98 batch = 0; 106 total ? &batch : NULL, &batch, 129 CHECK((err && errno != ENOENT), "delete batch",
|
/linux-master/arch/riscv/include/asm/ |
H A D | tlbflush.h | 52 void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, 56 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
|
/linux-master/drivers/xen/ |
H A D | gntdev.c | 799 static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt, argument 807 ret = pin_user_pages_fast(addr, 1, batch->writeable ? FOLL_WRITE : 0, &page); 811 batch->pages[batch->nr_pages++] = page; 819 static void gntdev_put_pages(struct gntdev_copy_batch *batch) argument 821 unpin_user_pages_dirty_lock(batch->pages, batch->nr_pages, batch->writeable); 822 batch->nr_pages = 0; 823 batch 826 gntdev_copy(struct gntdev_copy_batch *batch) argument 858 gntdev_grant_copy_seg(struct gntdev_copy_batch *batch, struct gntdev_grant_copy_segment *seg, s16 __user *status) argument 956 struct gntdev_copy_batch batch; local [all...] |
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
H A D | rx.c | 26 int batch, i; local 35 batch = xsk_buff_alloc_batch(rq->xsk_pool, xsk_buffs, 38 /* If batch < pages_per_wqe, either: 44 for (; batch < rq->mpwqe.pages_per_wqe; batch++) { 45 xsk_buffs[batch] = xsk_buff_alloc(rq->xsk_pool); 46 if (unlikely(!xsk_buffs[batch])) 55 for (i = 0; i < batch; i++) { 65 for (i = 0; i < batch; i++) { 78 for (i = 0; i < batch; [all...] |
/linux-master/include/linux/mailbox/ |
H A D | brcm-message.h | 45 } batch; member in union:brcm_message::__anon746
|