Lines Matching refs:pd

120 void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
122 struct drm_device *dev = pd->driver->dev;
127 down_write(&pd->driver->sem);
128 PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
130 psb_mmu_flush_pd_locked(pd->driver, 1);
131 pd->hw_context = hw_context;
132 up_write(&pd->driver->sem);
160 struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
164 if (!pd)
167 pd->p = alloc_page(GFP_DMA32);
168 if (!pd->p)
170 pd->dummy_pt = alloc_page(GFP_DMA32);
171 if (!pd->dummy_pt)
173 pd->dummy_page = alloc_page(GFP_DMA32);
174 if (!pd->dummy_page)
178 pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
180 pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
183 pd->invalid_pde = 0;
184 pd->invalid_pte = 0;
187 v = kmap_local_page(pd->dummy_pt);
189 v[i] = pd->invalid_pte;
193 v = kmap_local_page(pd->p);
195 v[i] = pd->invalid_pde;
199 clear_page(kmap(pd->dummy_page));
200 kunmap(pd->dummy_page);
202 pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
203 if (!pd->tables)
206 pd->hw_context = -1;
207 pd->pd_mask = PSB_PTE_VALID;
208 pd->driver = driver;
210 return pd;
213 __free_page(pd->dummy_page);
215 __free_page(pd->dummy_pt);
217 __free_page(pd->p);
219 kfree(pd);
229 void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
231 struct psb_mmu_driver *driver = pd->driver;
238 if (pd->hw_context != -1) {
239 PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
247 pt = pd->tables[i];
252 vfree(pd->tables);
253 __free_page(pd->dummy_page);
254 __free_page(pd->dummy_pt);
255 __free_page(pd->p);
256 kfree(pd);
260 static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
264 uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
266 spinlock_t *lock = &pd->driver->lock;
286 *ptes++ = pd->invalid_pte;
288 if (pd->driver->has_clflush && pd->hw_context != -1) {
300 pt->pd = pd;
306 static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
312 spinlock_t *lock = &pd->driver->lock;
315 pt = pd->tables[index];
318 pt = psb_mmu_alloc_pt(pd);
323 if (pd->tables[index]) {
327 pt = pd->tables[index];
331 v = kmap_atomic(pd->p);
332 pd->tables[index] = pt;
333 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
337 if (pd->hw_context != -1) {
338 psb_mmu_clflush(pd->driver, (void *)&v[index]);
339 atomic_set(&pd->driver->needs_tlbflush, 1);
346 static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
351 spinlock_t *lock = &pd->driver->lock;
354 pt = pd->tables[index];
365 struct psb_mmu_pd *pd = pt->pd;
370 v = kmap_atomic(pd->p);
371 v[pt->index] = pd->invalid_pde;
372 pd->tables[pt->index] = NULL;
374 if (pd->hw_context != -1) {
375 psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
376 atomic_set(&pd->driver->needs_tlbflush, 1);
379 spin_unlock(&pd->driver->lock);
383 spin_unlock(&pd->driver->lock);
395 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
400 struct psb_mmu_pd *pd;
403 pd = driver->default_pd;
406 return pd;
477 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
489 unsigned long clflush_add = pd->driver->clflush_add;
490 unsigned long clflush_mask = pd->driver->clflush_mask;
492 if (!pd->driver->has_clflush)
510 pt = psb_mmu_pt_map_lock(pd, addr);
525 void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
534 down_read(&pd->driver->sem);
541 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
553 if (pd->hw_context != -1)
554 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
556 up_read(&pd->driver->sem);
558 if (pd->hw_context != -1)
559 psb_mmu_flush(pd->driver);
564 void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
586 down_read(&pd->driver->sem);
597 pt = psb_mmu_pt_map_lock(pd, addr);
610 if (pd->hw_context != -1)
611 psb_mmu_flush_ptes(pd, f_address, num_pages,
614 up_read(&pd->driver->sem);
616 if (pd->hw_context != -1)
617 psb_mmu_flush(pd->driver);
620 int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
632 down_read(&pd->driver->sem);
639 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
655 if (pd->hw_context != -1)
656 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
658 up_read(&pd->driver->sem);
660 if (pd->hw_context != -1)
661 psb_mmu_flush(pd->driver);
666 int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
694 down_read(&pd->driver->sem);
703 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
721 if (pd->hw_context != -1)
722 psb_mmu_flush_ptes(pd, f_address, num_pages,
725 up_read(&pd->driver->sem);
727 if (pd->hw_context != -1)
728 psb_mmu_flush(pd->driver);
733 int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
739 spinlock_t *lock = &pd->driver->lock;
741 down_read(&pd->driver->sem);
742 pt = psb_mmu_pt_map_lock(pd, virtual);
747 v = kmap_atomic(pd->p);
752 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
753 !(pd->invalid_pte & PSB_PTE_VALID)) {
758 *pfn = pd->invalid_pte >> PAGE_SHIFT;
770 up_read(&pd->driver->sem);