Lines Matching refs:vm

26  * Find or create vm block based on requested @size.
30 get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc)
36 if (size > vm->size) {
42 mutex_lock(&vm->lock);
43 list_for_each(pos, &vm->unused) {
48 if (pos == &vm->unused)
52 /* Move the vm node from unused list to used list directly */
53 list_move(&entry->list, &vm->used);
54 vm->size -= size;
65 list_add(&block->list, &vm->used);
68 vm->size -= size;
71 mutex_unlock(&vm->lock);
75 static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block)
82 mutex_lock(&vm->lock);
84 vm->size += block->size;
86 list_for_each(pos, &vm->unused) {
91 if (pos == &vm->unused) {
92 list_add_tail(&block->list, &vm->unused);
107 while (pre != &vm->unused) {
119 mutex_unlock(&vm->lock);
124 ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
132 block = get_vm_block(vm, size, atc);
139 ptp = (unsigned long *)vm->ptp[0].area;
152 static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
155 put_vm_block(vm, block);
164 ct_get_ptp_phys(struct ct_vm *vm, int index)
166 return (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
171 struct ct_vm *vm;
177 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
178 if (!vm)
181 mutex_init(&vm->lock);
187 PAGE_SIZE, &vm->ptp[i]);
193 ct_vm_destroy(vm);
196 vm->size = CT_ADDRS_PER_PAGE * i;
197 vm->map = ct_vm_map;
198 vm->unmap = ct_vm_unmap;
199 vm->get_ptp_phys = ct_get_ptp_phys;
200 INIT_LIST_HEAD(&vm->unused);
201 INIT_LIST_HEAD(&vm->used);
205 block->size = vm->size;
206 list_add(&block->list, &vm->unused);
209 *rvm = vm;
215 void ct_vm_destroy(struct ct_vm *vm)
222 while (!list_empty(&vm->used)) {
223 pos = vm->used.next;
228 while (!list_empty(&vm->unused)) {
229 pos = vm->unused.next;
237 snd_dma_free_pages(&vm->ptp[i]);
239 vm->size = 0;
241 kfree(vm);