Searched refs:PAGE_SIZE (Results 276 - 300 of 3549) sorted by last modified time

<<11121314151617181920>>

/linux-master/drivers/net/ethernet/intel/idpf/
H A Didpf_txrx.c594 .max_len = PAGE_SIZE,
/linux-master/drivers/net/ethernet/intel/i40e/
H A Di40e_txrx.h130 #if (PAGE_SIZE < 8192)
138 page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
451 #if (PAGE_SIZE < 8192)
452 if (ring->rx_buf_len > (PAGE_SIZE / 2))
458 #define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
H A Di40e_txrx.c1602 #if (PAGE_SIZE >= 8192)
1969 #if (PAGE_SIZE < 8192)
1977 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
2004 #if (PAGE_SIZE < 8192)
2026 #if (PAGE_SIZE < 8192)
2559 #if (PAGE_SIZE > 4096)
2560 /* At larger PAGE_SIZE, frame_sz depend on len size */
3936 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
/linux-master/drivers/net/ethernet/intel/e1000e/
H A Dnetdev.c186 PAGE_SIZE, true);
750 0, PAGE_SIZE,
864 PAGE_SIZE,
1401 PAGE_SIZE,
1407 PAGE_SIZE,
1427 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1434 skb->truesize += PAGE_SIZE;
1494 skb->truesize += PAGE_SIZE;
1547 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1689 PAGE_SIZE, DMA_FROM_DEVIC
[all...]
/linux-master/drivers/net/ethernet/freescale/
H A Dfec_main.c1709 xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
1774 skb = build_skb(page_address(page), PAGE_SIZE);
/linux-master/drivers/mtd/devices/
H A Dblock2mtd.c70 max = page_address(page) + PAGE_SIZE;
74 memset(page_address(page), 0xff, PAGE_SIZE); local
111 int offset = from & (PAGE_SIZE-1);
115 if ((offset + len) > PAGE_SIZE)
116 cpylen = PAGE_SIZE - offset; // multiple pages
149 if ((offset+len) > PAGE_SIZE)
150 cpylen = PAGE_SIZE - offset; // multiple pages
315 dev->mtd.writebufsize = PAGE_SIZE;
412 size_t erase_size = PAGE_SIZE;
/linux-master/drivers/gpu/drm/i915/gt/
H A Dintel_engine_cs.c43 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
45 #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
46 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
47 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
48 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
50 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
278 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
301 PAGE_SIZE);
305 PAGE_SIZE);
322 return round_up(cxt_size * 64, PAGE_SIZE);
[all...]
H A Dgen8_ppgtt.c493 drm_clflush_virt_range(vaddr, PAGE_SIZE);
497 drm_clflush_virt_range(vaddr, PAGE_SIZE);
606 drm_clflush_virt_range(vaddr, PAGE_SIZE);
690 drm_clflush_virt_range(vaddr, PAGE_SIZE);
706 drm_clflush_virt_range(vaddr, PAGE_SIZE);
727 drm_clflush_virt_range(vaddr, PAGE_SIZE);
968 obj = i915_gem_object_create_lmem(i915, PAGE_SIZE,
972 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
/linux-master/drivers/block/null_blk/
H A Dmain.c48 #define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
270 return snprintf(page, PAGE_SIZE, "%u\n", val);
276 return snprintf(page, PAGE_SIZE, "%lu\n", val);
281 return snprintf(page, PAGE_SIZE, "%u\n", val);
666 return snprintf(page, PAGE_SIZE,
848 nullb->dev->curr_cache -= PAGE_SIZE;
865 nullb->dev->curr_cache += PAGE_SIZE;
1011 nullb->dev->curr_cache -= PAGE_SIZE;
1080 null_make_cache_space(nullb, PAGE_SIZE);
1893 lim.virt_boundary_mask = PAGE_SIZE
[all...]
/linux-master/drivers/base/
H A Dcore.c2446 if (ret >= (ssize_t)PAGE_SIZE) {
/linux-master/arch/x86/mm/pat/
H A Dmemtype.c418 unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
674 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
677 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
811 cursor += PAGE_SIZE;
1038 while (size > PAGE_SIZE) {
1039 size -= PAGE_SIZE;
1040 paddr += PAGE_SIZE;
/linux-master/arch/x86/kernel/
H A Dcallthunks.c148 dest < (void*)hypercall_page + PAGE_SIZE)
H A Dkvm.c960 nr_pages = DIV_ROUND_UP(entry->size, PAGE_SIZE);
/linux-master/arch/s390/mm/
H A Dfault.c95 return teid.addr * PAGE_SIZE;
473 regs->int_parm_long = (teid.addr * PAGE_SIZE) | (regs->psw.addr & PAGE_MASK);
/linux-master/arch/s390/kernel/
H A Dperf_pai_crypto.c453 memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE);
/linux-master/arch/riscv/mm/
H A Dtlbflush.c72 local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID);
151 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE); local
165 addr, PAGE_SIZE, PAGE_SIZE);
174 stride_size = PAGE_SIZE;
193 stride_size = PAGE_SIZE;
204 start, end - start, PAGE_SIZE);
236 FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
/linux-master/arch/riscv/kernel/
H A Dpatch.c69 bool across_pages = (((uintptr_t)addr & ~PAGE_MASK) + len) > PAGE_SIZE;
74 if (len + offset_in_page(addr) > 2 * PAGE_SIZE)
86 patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
106 bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
112 if (len + offset_in_page(addr) > 2 * PAGE_SIZE)
132 patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
172 * loop with len <= 2 * PAGE_SIZE.
175 size = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(addr + patched), len - patched);
207 * because __patch_insn_write() can only handle len <= 2 * PAGE_SIZE.
210 size = min_t(size_t, PAGE_SIZE *
[all...]
/linux-master/tools/testing/selftests/kvm/include/x86_64/
H A Dprocessor.h361 #define PAGE_SIZE (1ULL << PAGE_SHIFT) macro
362 #define PAGE_MASK (~(PAGE_SIZE-1) & PHYSICAL_PAGE_MASK)
/linux-master/security/selinux/
H A Dselinuxfs.c144 if (count >= PAGE_SIZE)
251 if (vma->vm_pgoff > 0 || size != PAGE_SIZE)
279 if (count >= PAGE_SIZE)
452 if (offset >= roundup(plm->len, PAGE_SIZE))
689 if (count >= PAGE_SIZE)
742 if (count >= PAGE_SIZE)
1223 length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing,
1246 if (count >= PAGE_SIZE)
1301 if (count >= PAGE_SIZE)
1369 len = snprintf(page, PAGE_SIZE, "/
[all...]
/linux-master/security/
H A Dsecurity.c4044 if (size > PAGE_SIZE)
/linux-master/fs/vboxsf/
H A Dfile.c234 u32 nread = PAGE_SIZE;
242 memset(&buf[nread], 0, PAGE_SIZE - nread);
279 u32 nwrite = PAGE_SIZE;
283 if (off + PAGE_SIZE > size)
337 if (!PageUptodate(page) && nwritten == PAGE_SIZE)
/linux-master/fs/
H A Dnamei.c5193 nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1);
/linux-master/arch/arm64/kvm/
H A Dmmu.c87 * of blocks into PAGE_SIZE PTEs. It assumes the range is already
340 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
536 start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
538 for (cur = start; cur < end; cur += PAGE_SIZE) {
556 start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
558 for (cur = start; cur < end; cur += PAGE_SIZE) {
590 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
594 err = __create_hyp_mappings(virt_addr, PAGE_SIZE, phys_addr,
647 * The allocated size is always a multiple of PAGE_SIZE.
702 size = PAGE_SIZE *
[all...]
/linux-master/arch/arm64/kvm/hyp/vhe/
H A Dtlb.c164 * the same level, assume the worst case as PAGE_SIZE
166 stride = PAGE_SIZE;
/linux-master/arch/arm64/kvm/hyp/
H A Dpgtable.c328 .start = ALIGN_DOWN(addr, PAGE_SIZE),
329 .addr = ALIGN_DOWN(addr, PAGE_SIZE),
372 ret = kvm_pgtable_walk(pgt, ALIGN_DOWN(addr, PAGE_SIZE),
373 PAGE_SIZE, &walker);
494 .phys = ALIGN_DOWN(phys, PAGE_SIZE),
1085 .phys = ALIGN_DOWN(phys, PAGE_SIZE),
1562 pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1585 return kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1614 pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;

Completed in 348 milliseconds

<<11121314151617181920>>