1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
5 *
6 * EMU10K1 memory page allocation (PTB area)
7 */
8
9 #include <linux/pci.h>
10 #include <linux/gfp.h>
11 #include <linux/time.h>
12 #include <linux/mutex.h>
13 #include <linux/export.h>
14
15 #include <sound/core.h>
16 #include <sound/emu10k1.h>
17
18 /* page arguments of these two macros are Emu page (4096 bytes), not like
19 * aligned pages in others
20 */
21 #define __set_ptb_entry(emu,page,addr) \
22 (((__le32 *)(emu)->ptb_pages.area)[page] = \
23 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
24 #define __get_ptb_entry(emu, page) \
25 (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
26
27 #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
28 #define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES)
29 #define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES)
30 /* get aligned page from offset address */
31 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
32 /* get offset address from aligned page */
33 #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
34
35 #if PAGE_SIZE == EMUPAGESIZE && !IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
36 /* fill PTB entrie(s) corresponding to page with addr */
37 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
38 /* fill PTB entrie(s) corresponding to page with silence pointer */
39 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
40 #else
41 /* fill PTB entries -- we need to fill UNIT_PAGES entries */
set_ptb_entry(struct snd_emu10k1 * emu,int page,dma_addr_t addr)42 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
43 {
44 int i;
45 page *= UNIT_PAGES;
46 for (i = 0; i < UNIT_PAGES; i++, page++) {
47 __set_ptb_entry(emu, page, addr);
48 dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page,
49 (unsigned int)__get_ptb_entry(emu, page));
50 addr += EMUPAGESIZE;
51 }
52 }
set_silent_ptb(struct snd_emu10k1 * emu,int page)53 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
54 {
55 int i;
56 page *= UNIT_PAGES;
57 for (i = 0; i < UNIT_PAGES; i++, page++) {
58 /* do not increment ptr */
59 __set_ptb_entry(emu, page, emu->silent_page.addr);
60 dev_dbg(emu->card->dev, "mapped silent page %d to entry %.8x\n",
61 page, (unsigned int)__get_ptb_entry(emu, page));
62 }
63 }
64 #endif /* PAGE_SIZE */
65
66
67 /*
68 */
69 static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
70 static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
71
72 #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
73
74
75 /* initialize emu10k1 part */
emu10k1_memblk_init(struct snd_emu10k1_memblk * blk)76 static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
77 {
78 blk->mapped_page = -1;
79 INIT_LIST_HEAD(&blk->mapped_link);
80 INIT_LIST_HEAD(&blk->mapped_order_link);
81 blk->map_locked = 0;
82
83 blk->first_page = get_aligned_page(blk->mem.offset);
84 blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
85 blk->pages = blk->last_page - blk->first_page + 1;
86 }
87
88 /*
89 * search empty region on PTB with the given size
90 *
91 * if an empty region is found, return the page and store the next mapped block
92 * in nextp
93 * if not found, return a negative error code.
94 */
search_empty_map_area(struct snd_emu10k1 * emu,int npages,struct list_head ** nextp)95 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
96 {
97 int page = 1, found_page = -ENOMEM;
98 int max_size = npages;
99 int size;
100 struct list_head *candidate = &emu->mapped_link_head;
101 struct list_head *pos;
102
103 list_for_each (pos, &emu->mapped_link_head) {
104 struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
105 if (blk->mapped_page < 0)
106 continue;
107 size = blk->mapped_page - page;
108 if (size == npages) {
109 *nextp = pos;
110 return page;
111 }
112 else if (size > max_size) {
113 /* we look for the maximum empty hole */
114 max_size = size;
115 candidate = pos;
116 found_page = page;
117 }
118 page = blk->mapped_page + blk->pages;
119 }
120 size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
121 if (size >= max_size) {
122 *nextp = pos;
123 return page;
124 }
125 *nextp = candidate;
126 return found_page;
127 }
128
129 /*
130 * map a memory block onto emu10k1's PTB
131 *
132 * call with memblk_lock held
133 */
map_memblk(struct snd_emu10k1 * emu,struct snd_emu10k1_memblk * blk)134 static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
135 {
136 int page, pg;
137 struct list_head *next;
138
139 page = search_empty_map_area(emu, blk->pages, &next);
140 if (page < 0) /* not found */
141 return page;
142 if (page == 0) {
143 dev_err(emu->card->dev, "trying to map zero (reserved) page\n");
144 return -EINVAL;
145 }
146 /* insert this block in the proper position of mapped list */
147 list_add_tail(&blk->mapped_link, next);
148 /* append this as a newest block in order list */
149 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
150 blk->mapped_page = page;
151 /* fill PTB */
152 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
153 set_ptb_entry(emu, page, emu->page_addr_table[pg]);
154 page++;
155 }
156 return 0;
157 }
158
159 /*
160 * unmap the block
161 * return the size of resultant empty pages
162 *
163 * call with memblk_lock held
164 */
unmap_memblk(struct snd_emu10k1 * emu,struct snd_emu10k1_memblk * blk)165 static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
166 {
167 int start_page, end_page, mpage, pg;
168 struct list_head *p;
169 struct snd_emu10k1_memblk *q;
170
171 /* calculate the expected size of empty region */
172 if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
173 q = get_emu10k1_memblk(p, mapped_link);
174 start_page = q->mapped_page + q->pages;
175 } else
176 start_page = 1;
177 if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
178 q = get_emu10k1_memblk(p, mapped_link);
179 end_page = q->mapped_page;
180 } else
181 end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
182
183 /* remove links */
184 list_del(&blk->mapped_link);
185 list_del(&blk->mapped_order_link);
186 /* clear PTB */
187 mpage = blk->mapped_page;
188 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
189 set_silent_ptb(emu, mpage);
190 mpage++;
191 }
192 blk->mapped_page = -1;
193 return end_page - start_page; /* return the new empty size */
194 }
195
196 /*
197 * search empty pages with the given size, and create a memory block
198 *
199 * unlike synth_alloc the memory block is aligned to the page start
200 */
201 static struct snd_emu10k1_memblk *
search_empty(struct snd_emu10k1 * emu,int size)202 search_empty(struct snd_emu10k1 *emu, int size)
203 {
204 struct list_head *p;
205 struct snd_emu10k1_memblk *blk;
206 int page, psize;
207
208 psize = get_aligned_page(size + PAGE_SIZE -1);
209 page = 0;
210 list_for_each(p, &emu->memhdr->block) {
211 blk = get_emu10k1_memblk(p, mem.list);
212 if (page + psize <= blk->first_page)
213 goto __found_pages;
214 page = blk->last_page + 1;
215 }
216 if (page + psize > emu->max_cache_pages)
217 return NULL;
218
219 __found_pages:
220 /* create a new memory block */
221 blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
222 if (blk == NULL)
223 return NULL;
224 blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
225 emu10k1_memblk_init(blk);
226 return blk;
227 }
228
229
230 /*
231 * check if the given pointer is valid for pages
232 */
is_valid_page(struct snd_emu10k1 * emu,dma_addr_t addr)233 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
234 {
235 if (addr & ~emu->dma_mask) {
236 dev_err_ratelimited(emu->card->dev,
237 "max memory size is 0x%lx (addr = 0x%lx)!!\n",
238 emu->dma_mask, (unsigned long)addr);
239 return 0;
240 }
241 if (addr & (EMUPAGESIZE-1)) {
242 dev_err_ratelimited(emu->card->dev, "page is not aligned\n");
243 return 0;
244 }
245 return 1;
246 }
247
248 /*
249 * map the given memory block on PTB.
250 * if the block is already mapped, update the link order.
251 * if no empty pages are found, tries to release unused memory blocks
252 * and retry the mapping.
253 */
snd_emu10k1_memblk_map(struct snd_emu10k1 * emu,struct snd_emu10k1_memblk * blk)254 int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
255 {
256 int err;
257 int size;
258 struct list_head *p, *nextp;
259 struct snd_emu10k1_memblk *deleted;
260 unsigned long flags;
261
262 spin_lock_irqsave(&emu->memblk_lock, flags);
263 if (blk->mapped_page >= 0) {
264 /* update order link */
265 list_move_tail(&blk->mapped_order_link,
266 &emu->mapped_order_link_head);
267 spin_unlock_irqrestore(&emu->memblk_lock, flags);
268 return 0;
269 }
270 if ((err = map_memblk(emu, blk)) < 0) {
271 /* no enough page - try to unmap some blocks */
272 /* starting from the oldest block */
273 p = emu->mapped_order_link_head.next;
274 for (; p != &emu->mapped_order_link_head; p = nextp) {
275 nextp = p->next;
276 deleted = get_emu10k1_memblk(p, mapped_order_link);
277 if (deleted->map_locked)
278 continue;
279 size = unmap_memblk(emu, deleted);
280 if (size >= blk->pages) {
281 /* ok the empty region is enough large */
282 err = map_memblk(emu, blk);
283 break;
284 }
285 }
286 }
287 spin_unlock_irqrestore(&emu->memblk_lock, flags);
288 return err;
289 }
290
291 EXPORT_SYMBOL(snd_emu10k1_memblk_map);
292
293 /*
294 * page allocation for DMA
295 */
296 struct snd_util_memblk *
snd_emu10k1_alloc_pages(struct snd_emu10k1 * emu,struct snd_pcm_substream * substream)297 snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
298 {
299 struct snd_pcm_runtime *runtime = substream->runtime;
300 struct snd_util_memhdr *hdr;
301 struct snd_emu10k1_memblk *blk;
302 int page, err, idx;
303
304 if (snd_BUG_ON(!emu))
305 return NULL;
306 if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
307 runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
308 return NULL;
309 hdr = emu->memhdr;
310 if (snd_BUG_ON(!hdr))
311 return NULL;
312
313 idx = runtime->period_size >= runtime->buffer_size ?
314 (emu->delay_pcm_irq * 2) : 0;
315 mutex_lock(&hdr->block_mutex);
316 blk = search_empty(emu, runtime->dma_bytes + idx);
317 if (blk == NULL) {
318 mutex_unlock(&hdr->block_mutex);
319 return NULL;
320 }
321 /* fill buffer addresses but pointers are not stored so that
322 * snd_free_pci_page() is not called in in synth_free()
323 */
324 idx = 0;
325 for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
326 unsigned long ofs = idx << PAGE_SHIFT;
327 dma_addr_t addr;
328 if (ofs >= runtime->dma_bytes)
329 addr = emu->silent_page.addr;
330 else
331 addr = snd_pcm_sgbuf_get_addr(substream, ofs);
332 if (! is_valid_page(emu, addr)) {
333 dev_err_ratelimited(emu->card->dev,
334 "emu: failure page = %d\n", idx);
335 mutex_unlock(&hdr->block_mutex);
336 return NULL;
337 }
338 emu->page_addr_table[page] = addr;
339 emu->page_ptr_table[page] = NULL;
340 }
341
342 /* set PTB entries */
343 blk->map_locked = 1; /* do not unmap this block! */
344 err = snd_emu10k1_memblk_map(emu, blk);
345 if (err < 0) {
346 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
347 mutex_unlock(&hdr->block_mutex);
348 return NULL;
349 }
350 mutex_unlock(&hdr->block_mutex);
351 return (struct snd_util_memblk *)blk;
352 }
353
354
355 /*
356 * release DMA buffer from page table
357 */
snd_emu10k1_free_pages(struct snd_emu10k1 * emu,struct snd_util_memblk * blk)358 int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
359 {
360 if (snd_BUG_ON(!emu || !blk))
361 return -EINVAL;
362 return snd_emu10k1_synth_free(emu, blk);
363 }
364
365 /*
366 * allocate DMA pages, widening the allocation if necessary
367 *
368 * See the comment above snd_emu10k1_detect_iommu() in emu10k1_main.c why
369 * this might be needed.
370 *
371 * If you modify this function check whether __synth_free_pages() also needs
372 * changes.
373 */
snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 * emu,size_t size,struct snd_dma_buffer * dmab)374 int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size,
375 struct snd_dma_buffer *dmab)
376 {
377 if (emu->iommu_workaround) {
378 size_t npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
379 size_t size_real = npages * PAGE_SIZE;
380
381 /*
382 * The device has been observed to accesses up to 256 extra
383 * bytes, but use 1k to be safe.
384 */
385 if (size_real < size + 1024)
386 size += PAGE_SIZE;
387 }
388
389 return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
390 snd_dma_pci_data(emu->pci), size, dmab);
391 }
392
393 /*
394 * memory allocation using multiple pages (for synth)
395 * Unlike the DMA allocation above, non-contiguous pages are assined.
396 */
397
398 /*
399 * allocate a synth sample area
400 */
401 struct snd_util_memblk *
snd_emu10k1_synth_alloc(struct snd_emu10k1 * hw,unsigned int size)402 snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
403 {
404 struct snd_emu10k1_memblk *blk;
405 struct snd_util_memhdr *hdr = hw->memhdr;
406
407 mutex_lock(&hdr->block_mutex);
408 blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
409 if (blk == NULL) {
410 mutex_unlock(&hdr->block_mutex);
411 return NULL;
412 }
413 if (synth_alloc_pages(hw, blk)) {
414 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
415 mutex_unlock(&hdr->block_mutex);
416 return NULL;
417 }
418 snd_emu10k1_memblk_map(hw, blk);
419 mutex_unlock(&hdr->block_mutex);
420 return (struct snd_util_memblk *)blk;
421 }
422
423 EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
424
425 /*
426 * free a synth sample area
427 */
428 int
snd_emu10k1_synth_free(struct snd_emu10k1 * emu,struct snd_util_memblk * memblk)429 snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
430 {
431 struct snd_util_memhdr *hdr = emu->memhdr;
432 struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
433 unsigned long flags;
434
435 mutex_lock(&hdr->block_mutex);
436 spin_lock_irqsave(&emu->memblk_lock, flags);
437 if (blk->mapped_page >= 0)
438 unmap_memblk(emu, blk);
439 spin_unlock_irqrestore(&emu->memblk_lock, flags);
440 synth_free_pages(emu, blk);
441 __snd_util_mem_free(hdr, memblk);
442 mutex_unlock(&hdr->block_mutex);
443 return 0;
444 }
445
446 EXPORT_SYMBOL(snd_emu10k1_synth_free);
447
448 /* check new allocation range */
get_single_page_range(struct snd_util_memhdr * hdr,struct snd_emu10k1_memblk * blk,int * first_page_ret,int * last_page_ret)449 static void get_single_page_range(struct snd_util_memhdr *hdr,
450 struct snd_emu10k1_memblk *blk,
451 int *first_page_ret, int *last_page_ret)
452 {
453 struct list_head *p;
454 struct snd_emu10k1_memblk *q;
455 int first_page, last_page;
456 first_page = blk->first_page;
457 if ((p = blk->mem.list.prev) != &hdr->block) {
458 q = get_emu10k1_memblk(p, mem.list);
459 if (q->last_page == first_page)
460 first_page++; /* first page was already allocated */
461 }
462 last_page = blk->last_page;
463 if ((p = blk->mem.list.next) != &hdr->block) {
464 q = get_emu10k1_memblk(p, mem.list);
465 if (q->first_page == last_page)
466 last_page--; /* last page was already allocated */
467 }
468 *first_page_ret = first_page;
469 *last_page_ret = last_page;
470 }
471
472 /* release allocated pages */
__synth_free_pages(struct snd_emu10k1 * emu,int first_page,int last_page)473 static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
474 int last_page)
475 {
476 struct snd_dma_buffer dmab;
477 int page;
478
479 dmab.dev.type = SNDRV_DMA_TYPE_DEV;
480 dmab.dev.dev = snd_dma_pci_data(emu->pci);
481
482 for (page = first_page; page <= last_page; page++) {
483 if (emu->page_ptr_table[page] == NULL)
484 continue;
485 dmab.area = emu->page_ptr_table[page];
486 dmab.addr = emu->page_addr_table[page];
487
488 /*
489 * please keep me in sync with logic in
490 * snd_emu10k1_alloc_pages_maybe_wider()
491 */
492 dmab.bytes = PAGE_SIZE;
493 if (emu->iommu_workaround)
494 dmab.bytes *= 2;
495
496 snd_dma_free_pages(&dmab);
497 emu->page_addr_table[page] = 0;
498 emu->page_ptr_table[page] = NULL;
499 }
500 }
501
502 /*
503 * allocate kernel pages
504 */
synth_alloc_pages(struct snd_emu10k1 * emu,struct snd_emu10k1_memblk * blk)505 static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
506 {
507 int page, first_page, last_page;
508 struct snd_dma_buffer dmab;
509
510 emu10k1_memblk_init(blk);
511 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
512 /* allocate kernel pages */
513 for (page = first_page; page <= last_page; page++) {
514 if (snd_emu10k1_alloc_pages_maybe_wider(emu, PAGE_SIZE,
515 &dmab) < 0)
516 goto __fail;
517 if (!is_valid_page(emu, dmab.addr)) {
518 snd_dma_free_pages(&dmab);
519 goto __fail;
520 }
521 emu->page_addr_table[page] = dmab.addr;
522 emu->page_ptr_table[page] = dmab.area;
523 }
524 return 0;
525
526 __fail:
527 /* release allocated pages */
528 last_page = page - 1;
529 __synth_free_pages(emu, first_page, last_page);
530
531 return -ENOMEM;
532 }
533
534 /*
535 * free pages
536 */
synth_free_pages(struct snd_emu10k1 * emu,struct snd_emu10k1_memblk * blk)537 static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
538 {
539 int first_page, last_page;
540
541 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
542 __synth_free_pages(emu, first_page, last_page);
543 return 0;
544 }
545
546 /* calculate buffer pointer from offset address */
offset_ptr(struct snd_emu10k1 * emu,int page,int offset)547 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
548 {
549 char *ptr;
550 if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
551 return NULL;
552 ptr = emu->page_ptr_table[page];
553 if (! ptr) {
554 dev_err(emu->card->dev,
555 "access to NULL ptr: page = %d\n", page);
556 return NULL;
557 }
558 ptr += offset & (PAGE_SIZE - 1);
559 return (void*)ptr;
560 }
561
562 /*
563 * bzero(blk + offset, size)
564 */
snd_emu10k1_synth_bzero(struct snd_emu10k1 * emu,struct snd_util_memblk * blk,int offset,int size)565 int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
566 int offset, int size)
567 {
568 int page, nextofs, end_offset, temp, temp1;
569 void *ptr;
570 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
571
572 offset += blk->offset & (PAGE_SIZE - 1);
573 end_offset = offset + size;
574 page = get_aligned_page(offset);
575 do {
576 nextofs = aligned_page_offset(page + 1);
577 temp = nextofs - offset;
578 temp1 = end_offset - offset;
579 if (temp1 < temp)
580 temp = temp1;
581 ptr = offset_ptr(emu, page + p->first_page, offset);
582 if (ptr)
583 memset(ptr, 0, temp);
584 offset = nextofs;
585 page++;
586 } while (offset < end_offset);
587 return 0;
588 }
589
590 EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
591
592 /*
593 * copy_from_user(blk + offset, data, size)
594 */
snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 * emu,struct snd_util_memblk * blk,int offset,const char __user * data,int size)595 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
596 int offset, const char __user *data, int size)
597 {
598 int page, nextofs, end_offset, temp, temp1;
599 void *ptr;
600 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
601
602 offset += blk->offset & (PAGE_SIZE - 1);
603 end_offset = offset + size;
604 page = get_aligned_page(offset);
605 do {
606 nextofs = aligned_page_offset(page + 1);
607 temp = nextofs - offset;
608 temp1 = end_offset - offset;
609 if (temp1 < temp)
610 temp = temp1;
611 ptr = offset_ptr(emu, page + p->first_page, offset);
612 if (ptr && copy_from_user(ptr, data, temp))
613 return -EFAULT;
614 offset = nextofs;
615 data += temp;
616 page++;
617 } while (offset < end_offset);
618 return 0;
619 }
620
621 EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);
622