Lines Matching refs:emu

21 #define __set_ptb_entry(emu,page,addr) \  argument
22 (((__le32 *)(emu)->ptb_pages.area)[page] = \
23 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
24 #define __get_ptb_entry(emu, page) \ argument
25 (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
37 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr) argument
39 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr) argument
42 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr) in set_ptb_entry() argument
47 __set_ptb_entry(emu, page, addr); in set_ptb_entry()
48 dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page, in set_ptb_entry()
49 (unsigned int)__get_ptb_entry(emu, page)); in set_ptb_entry()
53 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page) in set_silent_ptb() argument
59 __set_ptb_entry(emu, page, emu->silent_page.addr); in set_silent_ptb()
60 dev_dbg(emu->card->dev, "mapped silent page %d to entry %.8x\n", in set_silent_ptb()
61 page, (unsigned int)__get_ptb_entry(emu, page)); in set_silent_ptb()
95 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp) in search_empty_map_area() argument
100 struct list_head *candidate = &emu->mapped_link_head; in search_empty_map_area()
103 list_for_each (pos, &emu->mapped_link_head) { in search_empty_map_area()
120 size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page; in search_empty_map_area()
134 static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) in map_memblk() argument
139 page = search_empty_map_area(emu, blk->pages, &next); in map_memblk()
143 dev_err(emu->card->dev, "trying to map zero (reserved) page\n"); in map_memblk()
149 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head); in map_memblk()
153 set_ptb_entry(emu, page, emu->page_addr_table[pg]); in map_memblk()
165 static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) in unmap_memblk() argument
173 if (p != &emu->mapped_link_head) { in unmap_memblk()
180 if (p != &emu->mapped_link_head) { in unmap_memblk()
184 end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0); in unmap_memblk()
193 set_silent_ptb(emu, mpage); in unmap_memblk()
206 search_empty(struct snd_emu10k1 *emu, int size) in search_empty() argument
214 list_for_each(p, &emu->memhdr->block) { in search_empty()
220 if (page + psize > emu->max_cache_pages) in search_empty()
225 …blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev… in search_empty()
237 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr) in is_valid_page() argument
239 if (addr & ~emu->dma_mask) { in is_valid_page()
240 dev_err_ratelimited(emu->card->dev, in is_valid_page()
242 emu->dma_mask, (unsigned long)addr); in is_valid_page()
246 dev_err_ratelimited(emu->card->dev, "page is not aligned\n"); in is_valid_page()
258 int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) in snd_emu10k1_memblk_map() argument
266 spin_lock_irqsave(&emu->memblk_lock, flags); in snd_emu10k1_memblk_map()
270 &emu->mapped_order_link_head); in snd_emu10k1_memblk_map()
271 spin_unlock_irqrestore(&emu->memblk_lock, flags); in snd_emu10k1_memblk_map()
274 err = map_memblk(emu, blk); in snd_emu10k1_memblk_map()
278 p = emu->mapped_order_link_head.next; in snd_emu10k1_memblk_map()
279 for (; p != &emu->mapped_order_link_head; p = nextp) { in snd_emu10k1_memblk_map()
284 size = unmap_memblk(emu, deleted); in snd_emu10k1_memblk_map()
287 err = map_memblk(emu, blk); in snd_emu10k1_memblk_map()
292 spin_unlock_irqrestore(&emu->memblk_lock, flags); in snd_emu10k1_memblk_map()
302 snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream) in snd_emu10k1_alloc_pages() argument
309 if (snd_BUG_ON(!emu)) in snd_emu10k1_alloc_pages()
312 runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE)) in snd_emu10k1_alloc_pages()
314 hdr = emu->memhdr; in snd_emu10k1_alloc_pages()
319 (emu->delay_pcm_irq * 2) : 0; in snd_emu10k1_alloc_pages()
321 blk = search_empty(emu, runtime->dma_bytes + idx); in snd_emu10k1_alloc_pages()
334 addr = emu->silent_page.addr; in snd_emu10k1_alloc_pages()
337 if (! is_valid_page(emu, addr)) { in snd_emu10k1_alloc_pages()
338 dev_err_ratelimited(emu->card->dev, in snd_emu10k1_alloc_pages()
343 emu->page_addr_table[page] = addr; in snd_emu10k1_alloc_pages()
344 emu->page_ptr_table[page] = NULL; in snd_emu10k1_alloc_pages()
349 err = snd_emu10k1_memblk_map(emu, blk); in snd_emu10k1_alloc_pages()
363 int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk) in snd_emu10k1_free_pages() argument
365 if (snd_BUG_ON(!emu || !blk)) in snd_emu10k1_free_pages()
367 return snd_emu10k1_synth_free(emu, blk); in snd_emu10k1_free_pages()
379 int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size, in snd_emu10k1_alloc_pages_maybe_wider() argument
382 if (emu->iommu_workaround) { in snd_emu10k1_alloc_pages_maybe_wider()
395 &emu->pci->dev, size, dmab); in snd_emu10k1_alloc_pages_maybe_wider()
434 snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk) in snd_emu10k1_synth_free() argument
436 struct snd_util_memhdr *hdr = emu->memhdr; in snd_emu10k1_synth_free()
441 spin_lock_irqsave(&emu->memblk_lock, flags); in snd_emu10k1_synth_free()
443 unmap_memblk(emu, blk); in snd_emu10k1_synth_free()
444 spin_unlock_irqrestore(&emu->memblk_lock, flags); in snd_emu10k1_synth_free()
445 synth_free_pages(emu, blk); in snd_emu10k1_synth_free()
480 static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page, in __synth_free_pages() argument
487 dmab.dev.dev = &emu->pci->dev; in __synth_free_pages()
490 if (emu->page_ptr_table[page] == NULL) in __synth_free_pages()
492 dmab.area = emu->page_ptr_table[page]; in __synth_free_pages()
493 dmab.addr = emu->page_addr_table[page]; in __synth_free_pages()
500 if (emu->iommu_workaround) in __synth_free_pages()
504 emu->page_addr_table[page] = 0; in __synth_free_pages()
505 emu->page_ptr_table[page] = NULL; in __synth_free_pages()
512 static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) in synth_alloc_pages() argument
518 get_single_page_range(emu->memhdr, blk, &first_page, &last_page); in synth_alloc_pages()
521 if (snd_emu10k1_alloc_pages_maybe_wider(emu, PAGE_SIZE, in synth_alloc_pages()
524 if (!is_valid_page(emu, dmab.addr)) { in synth_alloc_pages()
528 emu->page_addr_table[page] = dmab.addr; in synth_alloc_pages()
529 emu->page_ptr_table[page] = dmab.area; in synth_alloc_pages()
536 __synth_free_pages(emu, first_page, last_page); in synth_alloc_pages()
544 static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) in synth_free_pages() argument
548 get_single_page_range(emu->memhdr, blk, &first_page, &last_page); in synth_free_pages()
549 __synth_free_pages(emu, first_page, last_page); in synth_free_pages()
554 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset) in offset_ptr() argument
557 if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages)) in offset_ptr()
559 ptr = emu->page_ptr_table[page]; in offset_ptr()
561 dev_err(emu->card->dev, in offset_ptr()
572 int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, in snd_emu10k1_synth_bzero() argument
588 ptr = offset_ptr(emu, page + p->first_page, offset); in snd_emu10k1_synth_bzero()
602 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, in snd_emu10k1_synth_copy_from_user() argument
618 ptr = offset_ptr(emu, page + p->first_page, offset); in snd_emu10k1_synth_copy_from_user()