Lines Matching refs:emu
21 #define __set_ptb_entry(emu,page,addr) \ argument
22 (((__le32 *)(emu)->ptb_pages.area)[page] = \
23 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
24 #define __get_ptb_entry(emu, page) \ argument
25 (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
37 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr) argument
39 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr) argument
42 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr) in set_ptb_entry() argument
47 __set_ptb_entry(emu, page, addr); in set_ptb_entry()
48 dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page, in set_ptb_entry()
49 (unsigned int)__get_ptb_entry(emu, page)); in set_ptb_entry()
53 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page) in set_silent_ptb() argument
59 __set_ptb_entry(emu, page, emu->silent_page.addr); in set_silent_ptb()
60 dev_dbg(emu->card->dev, "mapped silent page %d to entry %.8x\n", in set_silent_ptb()
61 page, (unsigned int)__get_ptb_entry(emu, page)); in set_silent_ptb()
95 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp) in search_empty_map_area() argument
100 struct list_head *candidate = &emu->mapped_link_head; in search_empty_map_area()
103 list_for_each (pos, &emu->mapped_link_head) { in search_empty_map_area()
120 size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page; in search_empty_map_area()
134 static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) in map_memblk() argument
139 page = search_empty_map_area(emu, blk->pages, &next); in map_memblk()
143 dev_err(emu->card->dev, "trying to map zero (reserved) page\n"); in map_memblk()
149 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head); in map_memblk()
153 set_ptb_entry(emu, page, emu->page_addr_table[pg]); in map_memblk()
165 static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) in unmap_memblk() argument
172 if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) { in unmap_memblk()
177 if ((p = blk->mapped_link.next) != &emu->mapped_link_head) { in unmap_memblk()
181 end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0); in unmap_memblk()
189 set_silent_ptb(emu, mpage); in unmap_memblk()
202 search_empty(struct snd_emu10k1 *emu, int size) in search_empty() argument
210 list_for_each(p, &emu->memhdr->block) { in search_empty()
216 if (page + psize > emu->max_cache_pages) in search_empty()
221 …blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev… in search_empty()
233 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr) in is_valid_page() argument
235 if (addr & ~emu->dma_mask) { in is_valid_page()
236 dev_err_ratelimited(emu->card->dev, in is_valid_page()
238 emu->dma_mask, (unsigned long)addr); in is_valid_page()
242 dev_err_ratelimited(emu->card->dev, "page is not aligned\n"); in is_valid_page()
254 int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) in snd_emu10k1_memblk_map() argument
262 spin_lock_irqsave(&emu->memblk_lock, flags); in snd_emu10k1_memblk_map()
266 &emu->mapped_order_link_head); in snd_emu10k1_memblk_map()
267 spin_unlock_irqrestore(&emu->memblk_lock, flags); in snd_emu10k1_memblk_map()
270 if ((err = map_memblk(emu, blk)) < 0) { in snd_emu10k1_memblk_map()
273 p = emu->mapped_order_link_head.next; in snd_emu10k1_memblk_map()
274 for (; p != &emu->mapped_order_link_head; p = nextp) { in snd_emu10k1_memblk_map()
279 size = unmap_memblk(emu, deleted); in snd_emu10k1_memblk_map()
282 err = map_memblk(emu, blk); in snd_emu10k1_memblk_map()
287 spin_unlock_irqrestore(&emu->memblk_lock, flags); in snd_emu10k1_memblk_map()
297 snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream) in snd_emu10k1_alloc_pages() argument
304 if (snd_BUG_ON(!emu)) in snd_emu10k1_alloc_pages()
307 runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE)) in snd_emu10k1_alloc_pages()
309 hdr = emu->memhdr; in snd_emu10k1_alloc_pages()
314 (emu->delay_pcm_irq * 2) : 0; in snd_emu10k1_alloc_pages()
316 blk = search_empty(emu, runtime->dma_bytes + idx); in snd_emu10k1_alloc_pages()
329 addr = emu->silent_page.addr; in snd_emu10k1_alloc_pages()
332 if (! is_valid_page(emu, addr)) { in snd_emu10k1_alloc_pages()
333 dev_err_ratelimited(emu->card->dev, in snd_emu10k1_alloc_pages()
338 emu->page_addr_table[page] = addr; in snd_emu10k1_alloc_pages()
339 emu->page_ptr_table[page] = NULL; in snd_emu10k1_alloc_pages()
344 err = snd_emu10k1_memblk_map(emu, blk); in snd_emu10k1_alloc_pages()
358 int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk) in snd_emu10k1_free_pages() argument
360 if (snd_BUG_ON(!emu || !blk)) in snd_emu10k1_free_pages()
362 return snd_emu10k1_synth_free(emu, blk); in snd_emu10k1_free_pages()
374 int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size, in snd_emu10k1_alloc_pages_maybe_wider() argument
377 if (emu->iommu_workaround) { in snd_emu10k1_alloc_pages_maybe_wider()
390 &emu->pci->dev, size, dmab); in snd_emu10k1_alloc_pages_maybe_wider()
429 snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk) in snd_emu10k1_synth_free() argument
431 struct snd_util_memhdr *hdr = emu->memhdr; in snd_emu10k1_synth_free()
436 spin_lock_irqsave(&emu->memblk_lock, flags); in snd_emu10k1_synth_free()
438 unmap_memblk(emu, blk); in snd_emu10k1_synth_free()
439 spin_unlock_irqrestore(&emu->memblk_lock, flags); in snd_emu10k1_synth_free()
440 synth_free_pages(emu, blk); in snd_emu10k1_synth_free()
473 static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page, in __synth_free_pages() argument
480 dmab.dev.dev = &emu->pci->dev; in __synth_free_pages()
483 if (emu->page_ptr_table[page] == NULL) in __synth_free_pages()
485 dmab.area = emu->page_ptr_table[page]; in __synth_free_pages()
486 dmab.addr = emu->page_addr_table[page]; in __synth_free_pages()
493 if (emu->iommu_workaround) in __synth_free_pages()
497 emu->page_addr_table[page] = 0; in __synth_free_pages()
498 emu->page_ptr_table[page] = NULL; in __synth_free_pages()
505 static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) in synth_alloc_pages() argument
511 get_single_page_range(emu->memhdr, blk, &first_page, &last_page); in synth_alloc_pages()
514 if (snd_emu10k1_alloc_pages_maybe_wider(emu, PAGE_SIZE, in synth_alloc_pages()
517 if (!is_valid_page(emu, dmab.addr)) { in synth_alloc_pages()
521 emu->page_addr_table[page] = dmab.addr; in synth_alloc_pages()
522 emu->page_ptr_table[page] = dmab.area; in synth_alloc_pages()
529 __synth_free_pages(emu, first_page, last_page); in synth_alloc_pages()
537 static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) in synth_free_pages() argument
541 get_single_page_range(emu->memhdr, blk, &first_page, &last_page); in synth_free_pages()
542 __synth_free_pages(emu, first_page, last_page); in synth_free_pages()
547 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset) in offset_ptr() argument
550 if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages)) in offset_ptr()
552 ptr = emu->page_ptr_table[page]; in offset_ptr()
554 dev_err(emu->card->dev, in offset_ptr()
565 int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, in snd_emu10k1_synth_bzero() argument
581 ptr = offset_ptr(emu, page + p->first_page, offset); in snd_emu10k1_synth_bzero()
595 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, in snd_emu10k1_synth_copy_from_user() argument
611 ptr = offset_ptr(emu, page + p->first_page, offset); in snd_emu10k1_synth_copy_from_user()