Searched refs:page (Results 126 - 150 of 3144) sorted by relevance

1234567891011>>

/linux-master/include/xen/
H A Dballoon.h29 int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages);
30 void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages);
H A Dgrant_table.h40 #include <asm/page.h>
48 #include <xen/page.h>
50 #include <linux/page-flags.h>
81 struct page **pages;
102 * access has been ended, free the given page too. Access will be ended
104 * some time later. page may be NULL, in which case no freeing will occur.
105 * Note that the granted page might still be accessed (read or write) by the
106 * other side after gnttab_end_foreign_access() returns, so even if page was
107 * specified as NULL it is not allowed to just reuse the page for other
109 * reference to the granted page i
153 gnttab_page_grant_foreign_access_ref_one( grant_ref_t ref, domid_t domid, struct page *page, int readonly) argument
283 xen_page_foreign(struct page *page) argument
313 gnttab_for_one_grant(struct page *page, unsigned int offset, unsigned len, xen_grant_fn_t fn, void *data) argument
[all...]
/linux-master/arch/powerpc/platforms/pseries/
H A Dsvm.c69 static struct page *dtl_page_store[NR_DTL_PAGE];
72 static bool is_dtl_page_shared(struct page *page) argument
77 if (dtl_page_store[i] == page)
86 struct page *page = pfn_to_page(pfn); local
88 if (!is_dtl_page_shared(page)) {
89 dtl_page_store[dtl_nr_pages] = page;
/linux-master/arch/loongarch/include/asm/
H A Dasm-prototypes.h6 #include <asm/page.h>
/linux-master/drivers/gpu/drm/imagination/
H A Dpvr_fw_mips.h9 #include <asm/page.h>
24 struct page *pt_pages[PVR_MIPS_PT_PAGE_COUNT];
/linux-master/arch/nios2/include/asm/
H A Dcacheflush.h16 * This flag is used to indicate that the page pointed to by a pte is clean
31 void flush_dcache_page(struct page *page);
36 void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
44 extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
47 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
/linux-master/include/asm-generic/
H A Dcacheflush.h9 struct page;
51 static inline void flush_dcache_page(struct page *page) argument
82 struct page *page,
107 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
111 flush_icache_user_page(vma, page, vaddr, len); \
117 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
81 flush_icache_user_page(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len) argument
H A Dmemory_model.h19 #define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
38 #define __page_to_pfn(page) (unsigned long)((page) - vmemmap)
46 ({ const struct page *__pg = (pg); \
H A Dpage.h5 * Generic page.h implementation, for NOMMU architectures.
10 #error need to provide a real asm/page.h
14 /* PAGE_SHIFT determines the page size */
28 #define clear_page(page) memset((page), 0, PAGE_SIZE)
31 #define clear_user_page(page, vaddr, pg) clear_page(page)
49 typedef struct page *pgtable_t;
89 #define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
[all...]
/linux-master/sound/pci/emu10k1/
H A Dmemory.c6 * EMU10K1 memory page allocation (PTB area)
18 /* page arguments of these two macros are Emu page (4096 bytes), not like
21 #define __set_ptb_entry(emu,page,addr) \
22 (((__le32 *)(emu)->ptb_pages.area)[page] = \
23 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
24 #define __get_ptb_entry(emu, page) \
25 (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
30 /* get aligned page from offset address */
32 /* get offset address from aligned page */
42 set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr) argument
53 set_silent_ptb(struct snd_emu10k1 *emu, int page) argument
97 int page = 1, found_page = -ENOMEM; local
136 int page, pg; local
210 int page, psize; local
307 int page, err, idx; local
482 int page; local
512 int page, first_page, last_page; local
552 offset_ptr(struct snd_emu10k1 *emu, int page, int offset) argument
573 int page, nextofs, end_offset, temp, temp1; local
603 int page, nextofs, end_offset, temp, temp1; local
[all...]
/linux-master/arch/arm/mm/
H A Dcopypage-v4wb.c47 void v4wb_copy_user_highpage(struct page *to, struct page *from,
65 void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) argument
67 void *ptr, *kaddr = kmap_atomic(page);
H A Dcopypage-v4wt.c43 void v4wt_copy_user_highpage(struct page *to, struct page *from,
60 void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) argument
62 void *ptr, *kaddr = kmap_atomic(page);
H A Dcopypage-fa.c38 void fa_copy_user_highpage(struct page *to, struct page *from,
55 void fa_clear_user_highpage(struct page *page, unsigned long vaddr) argument
57 void *ptr, *kaddr = kmap_atomic(page);
H A Dcopypage-xsc3.c22 * The source page may have some clean entries in the cache already, but we
24 * if we eventually end up using our copied page.
64 void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
80 void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) argument
82 void *ptr, *kaddr = kmap_atomic(page);
H A Dcopypage-feroceon.c66 void feroceon_copy_user_highpage(struct page *to, struct page *from,
79 void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) argument
81 void *ptr, *kaddr = kmap_atomic(page);
/linux-master/arch/arm64/mm/
H A Dcopypage.c12 #include <asm/page.h>
17 void copy_highpage(struct page *to, struct page *from)
28 /* It's a new page, shouldn't have been tagged yet */
36 void copy_user_highpage(struct page *to, struct page *from,
/linux-master/tools/testing/radix-tree/
H A Dregression1.c17 * 4. The reader looks at the index 0 slot, and finds that the page has 0 ref
48 struct page { struct
55 static struct page *page_alloc(int index)
57 struct page *p;
58 p = malloc(sizeof(struct page));
68 struct page *p = container_of(rcu, struct page, rcu);
74 static void page_free(struct page *p)
80 unsigned int nr_pages, struct page **pages)
83 struct page *pag local
[all...]
/linux-master/drivers/gpu/drm/i915/gt/
H A Dshmem_utils.c59 struct page **pages;
102 struct page *page; local
105 page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
107 if (IS_ERR(page))
108 return PTR_ERR(page);
110 vaddr = kmap(page);
113 set_page_dirty(page);
117 mark_page_accessed(page);
118 kunmap(page);
137 struct page *page; local
[all...]
/linux-master/net/rds/
H A Dpage.c41 struct page *r_page;
58 * If @bytes is at least a full page then this just returns a page from
61 * If @bytes is a partial page then this stores the unused region of the
62 * page in a per-cpu structure. Future partial-page allocations may be
65 * path passes read-only page regions down to devices. They hold a page
73 struct page *page; local
[all...]
/linux-master/arch/hexagon/include/asm/
H A Dpage.h61 * We implement a two-level architecture-specific page table structure.
62 * Null intermediate page table level (pmd, pud) definitions will come from
68 typedef struct page *pgtable_t;
88 /* The "page frame" descriptor is defined in linux/mm.h */
89 struct page;
91 /* Returns page frame descriptor for virtual address. */
100 static inline void clear_page(void *page) argument
107 : "+r" (page)
118 #define clear_user_page(page, vaddr, pg) clear_page(page)
[all...]
/linux-master/include/linux/
H A Dasync_tx.h67 * @scribble: caller provided space for dma/page address conversions
119 enum dma_transaction_type tx_type, struct page **dst,
120 int dst_count, struct page **src, int src_count,
141 struct page *page; member in union:__anon4038
162 async_xor(struct page *dest, struct page **src_list, unsigned int offset,
166 async_xor_offs(struct page *dest, unsigned int offset,
167 struct page **src_list, unsigned int *src_offset,
171 async_xor_val(struct page *des
[all...]
/linux-master/drivers/net/dsa/b53/
H A Db53_mmap.c33 static int b53_mmap_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val) argument
38 *val = readb(regs + (page << 8) + reg);
43 static int b53_mmap_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val) argument
52 *val = ioread16be(regs + (page << 8) + reg);
54 *val = readw(regs + (page << 8) + reg);
59 static int b53_mmap_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val) argument
68 *val = ioread32be(regs + (page << 8) + reg);
70 *val = readl(regs + (page << 8) + reg);
75 static int b53_mmap_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val) argument
88 lo = ioread16be(regs + (page <<
114 b53_mmap_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val) argument
136 b53_mmap_write8(struct b53_device *dev, u8 page, u8 reg, u8 value) argument
146 b53_mmap_write16(struct b53_device *dev, u8 page, u8 reg, u16 value) argument
163 b53_mmap_write32(struct b53_device *dev, u8 page, u8 reg, u32 value) argument
180 b53_mmap_write48(struct b53_device *dev, u8 page, u8 reg, u64 value) argument
203 b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg, u64 value) argument
[all...]
/linux-master/arch/sparc/include/asm/
H A Dross.h12 #include <asm/page.h>
175 static inline void hyper_flush_cache_page(unsigned long page) argument
179 page &= PAGE_MASK;
180 end = page + PAGE_SIZE;
181 while (page < end) {
184 : "r" (page), "i" (ASI_M_FLUSH_PAGE)
186 page += vac_line_size;
/linux-master/fs/ocfs2/
H A Dsymlink.c25 * Jun 7 1999, cache symlink lookups in the page cache. -DaveM
57 struct page *page = &folio->page; local
58 struct inode *inode = page->mapping->host;
73 /* will be less than a page size */
75 kaddr = kmap_atomic(page);
78 SetPageUptodate(page);
79 unlock_page(page);
/linux-master/arch/x86/include/asm/
H A Dpage_64.h42 void clear_page_orig(void *page);
43 void clear_page_rep(void *page);
44 void clear_page_erms(void *page);
46 static inline void clear_page(void *page) argument
49 * Clean up KMSAN metadata for the page being cleared. The assembly call
50 * below clobbers @page, so we perform unpoisoning before it.
52 kmsan_unpoison_memory(page, PAGE_SIZE);
56 "=D" (page),
57 "0" (page)
75 * CPUs malfunction if they execute code from the highest canonical page
[all...]

Completed in 196 milliseconds

1234567891011>>