Lines Matching refs:page

6  *  EMU10K1 memory page allocation (PTB area)
18 /* page arguments of these two macros are Emu page (4096 bytes), not like
21 #define __set_ptb_entry(emu,page,addr) \
22 (((__le32 *)(emu)->ptb_pages.area)[page] = \
23 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
24 #define __get_ptb_entry(emu, page) \
25 (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
30 /* get aligned page from offset address */
32 /* get offset address from aligned page */
33 #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
36 /* fill PTB entrie(s) corresponding to page with addr */
37 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
38 /* fill PTB entrie(s) corresponding to page with silence pointer */
39 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
42 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
45 page *= UNIT_PAGES;
46 for (i = 0; i < UNIT_PAGES; i++, page++) {
47 __set_ptb_entry(emu, page, addr);
48 dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page,
49 (unsigned int)__get_ptb_entry(emu, page));
53 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
56 page *= UNIT_PAGES;
57 for (i = 0; i < UNIT_PAGES; i++, page++) {
59 __set_ptb_entry(emu, page, emu->silent_page.addr);
60 dev_dbg(emu->card->dev, "mapped silent page %d to entry %.8x\n",
61 page, (unsigned int)__get_ptb_entry(emu, page));
91 * if an empty region is found, return the page and store the next mapped block
97 int page = 1, found_page = -ENOMEM;
107 size = blk->mapped_page - page;
110 return page;
116 found_page = page;
118 page = blk->mapped_page + blk->pages;
120 size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
123 return page;
136 int page, pg;
139 page = search_empty_map_area(emu, blk->pages, &next);
140 if (page < 0) /* not found */
141 return page;
142 if (page == 0) {
143 dev_err(emu->card->dev, "trying to map zero (reserved) page\n");
150 blk->mapped_page = page;
153 set_ptb_entry(emu, page, emu->page_addr_table[pg]);
154 page++;
203 * unlike synth_alloc the memory block is aligned to the page start
210 int page, psize;
213 page = 0;
216 if (page + psize <= blk->first_page)
218 page = blk->last_page + 1;
220 if (page + psize > emu->max_cache_pages)
228 blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
246 dev_err_ratelimited(emu->card->dev, "page is not aligned\n");
276 /* no enough page - try to unmap some blocks */
299 * page allocation for DMA
307 int page, err, idx;
328 for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
337 "emu: failure page = %d\n", idx);
341 emu->page_addr_table[page] = addr;
342 emu->page_ptr_table[page] = NULL;
359 * release DMA buffer from page table
464 first_page++; /* first page was already allocated */
471 last_page--; /* last page was already allocated */
482 int page;
487 for (page = first_page; page <= last_page; page++) {
488 if (emu->page_ptr_table[page] == NULL)
490 dmab.area = emu->page_ptr_table[page];
491 dmab.addr = emu->page_addr_table[page];
502 emu->page_addr_table[page] = 0;
503 emu->page_ptr_table[page] = NULL;
512 int page, first_page, last_page;
518 for (page = first_page; page <= last_page; page++) {
526 emu->page_addr_table[page] = dmab.addr;
527 emu->page_ptr_table[page] = dmab.area;
533 last_page = page - 1;
552 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
555 if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
557 ptr = emu->page_ptr_table[page];
560 "access to NULL ptr: page = %d\n", page);
573 int page, nextofs, end_offset, temp, temp1;
582 page = get_aligned_page(offset);
584 nextofs = aligned_page_offset(page + 1);
589 ptr = offset_ptr(emu, page + p->first_page, offset);
593 page++;
633 int page, nextofs, end_offset, temp, temp1;
642 page = get_aligned_page(offset);
644 nextofs = aligned_page_offset(page + 1);
649 ptr = offset_ptr(emu, page + p->first_page, offset);
658 page++;