1 /**
2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
3 *
4 * This source file is released under GPL v2 license (no other versions).
5 * See the COPYING file included in the main directory of this source
6 * distribution for the license terms and conditions.
7 *
8 * @File ctvmem.c
9 *
10 * @Brief
11 * This file contains the implementation of virtual memory management object
12 * for card device.
13 *
14 * @Author Liu Chun
15 * @Date Apr 1 2008
16 */
17
18 #include "ctvmem.h"
19 #include "ctatc.h"
20 #include <linux/slab.h>
21 #include <linux/mm.h>
22 #include <linux/io.h>
23 #include <sound/pcm.h>
24
25 #define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *))
26 #define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE)
27
28 /* *
29 * Find or create vm block based on requested @size.
30 * @size must be page aligned.
31 * */
32 static struct ct_vm_block *
get_vm_block(struct ct_vm * vm,unsigned int size,struct ct_atc * atc)33 get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc)
34 {
35 struct ct_vm_block *block = NULL, *entry;
36 struct list_head *pos;
37
38 size = CT_PAGE_ALIGN(size);
39 if (size > vm->size) {
40 dev_err(atc->card->dev,
41 "Fail! No sufficient device virtual memory space available!\n");
42 return NULL;
43 }
44
45 mutex_lock(&vm->lock);
46 list_for_each(pos, &vm->unused) {
47 entry = list_entry(pos, struct ct_vm_block, list);
48 if (entry->size >= size)
49 break; /* found a block that is big enough */
50 }
51 if (pos == &vm->unused)
52 goto out;
53
54 if (entry->size == size) {
55 /* Move the vm node from unused list to used list directly */
56 list_move(&entry->list, &vm->used);
57 vm->size -= size;
58 block = entry;
59 goto out;
60 }
61
62 block = kzalloc(sizeof(*block), GFP_KERNEL);
63 if (!block)
64 goto out;
65
66 block->addr = entry->addr;
67 block->size = size;
68 list_add(&block->list, &vm->used);
69 entry->addr += size;
70 entry->size -= size;
71 vm->size -= size;
72
73 out:
74 mutex_unlock(&vm->lock);
75 return block;
76 }
77
put_vm_block(struct ct_vm * vm,struct ct_vm_block * block)78 static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block)
79 {
80 struct ct_vm_block *entry, *pre_ent;
81 struct list_head *pos, *pre;
82
83 block->size = CT_PAGE_ALIGN(block->size);
84
85 mutex_lock(&vm->lock);
86 list_del(&block->list);
87 vm->size += block->size;
88
89 list_for_each(pos, &vm->unused) {
90 entry = list_entry(pos, struct ct_vm_block, list);
91 if (entry->addr >= (block->addr + block->size))
92 break; /* found a position */
93 }
94 if (pos == &vm->unused) {
95 list_add_tail(&block->list, &vm->unused);
96 entry = block;
97 } else {
98 if ((block->addr + block->size) == entry->addr) {
99 entry->addr = block->addr;
100 entry->size += block->size;
101 kfree(block);
102 } else {
103 __list_add(&block->list, pos->prev, pos);
104 entry = block;
105 }
106 }
107
108 pos = &entry->list;
109 pre = pos->prev;
110 while (pre != &vm->unused) {
111 entry = list_entry(pos, struct ct_vm_block, list);
112 pre_ent = list_entry(pre, struct ct_vm_block, list);
113 if ((pre_ent->addr + pre_ent->size) > entry->addr)
114 break;
115
116 pre_ent->size += entry->size;
117 list_del(pos);
118 kfree(entry);
119 pos = pre;
120 pre = pos->prev;
121 }
122 mutex_unlock(&vm->lock);
123 }
124
125 /* Map host addr (kmalloced/vmalloced) to device logical addr. */
126 static struct ct_vm_block *
ct_vm_map(struct ct_vm * vm,struct snd_pcm_substream * substream,int size)127 ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
128 {
129 struct ct_vm_block *block;
130 unsigned int pte_start;
131 unsigned i, pages;
132 unsigned long *ptp;
133 struct ct_atc *atc = snd_pcm_substream_chip(substream);
134
135 block = get_vm_block(vm, size, atc);
136 if (block == NULL) {
137 dev_err(atc->card->dev,
138 "No virtual memory block that is big enough to allocate!\n");
139 return NULL;
140 }
141
142 ptp = (unsigned long *)vm->ptp[0].area;
143 pte_start = (block->addr >> CT_PAGE_SHIFT);
144 pages = block->size >> CT_PAGE_SHIFT;
145 for (i = 0; i < pages; i++) {
146 unsigned long addr;
147 addr = snd_pcm_sgbuf_get_addr(substream, i << CT_PAGE_SHIFT);
148 ptp[pte_start + i] = addr;
149 }
150
151 block->size = size;
152 return block;
153 }
154
ct_vm_unmap(struct ct_vm * vm,struct ct_vm_block * block)155 static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
156 {
157 /* do unmapping */
158 put_vm_block(vm, block);
159 }
160
161 /* *
162 * return the host physical addr of the @index-th device
163 * page table page on success, or ~0UL on failure.
164 * The first returned ~0UL indicates the termination.
165 * */
166 static dma_addr_t
ct_get_ptp_phys(struct ct_vm * vm,int index)167 ct_get_ptp_phys(struct ct_vm *vm, int index)
168 {
169 dma_addr_t addr;
170
171 addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
172
173 return addr;
174 }
175
ct_vm_create(struct ct_vm ** rvm,struct pci_dev * pci)176 int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
177 {
178 struct ct_vm *vm;
179 struct ct_vm_block *block;
180 int i, err = 0;
181
182 *rvm = NULL;
183
184 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
185 if (!vm)
186 return -ENOMEM;
187
188 mutex_init(&vm->lock);
189
190 /* Allocate page table pages */
191 for (i = 0; i < CT_PTP_NUM; i++) {
192 err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
193 snd_dma_pci_data(pci),
194 PAGE_SIZE, &vm->ptp[i]);
195 if (err < 0)
196 break;
197 }
198 if (err < 0) {
199 /* no page table pages are allocated */
200 ct_vm_destroy(vm);
201 return -ENOMEM;
202 }
203 vm->size = CT_ADDRS_PER_PAGE * i;
204 vm->map = ct_vm_map;
205 vm->unmap = ct_vm_unmap;
206 vm->get_ptp_phys = ct_get_ptp_phys;
207 INIT_LIST_HEAD(&vm->unused);
208 INIT_LIST_HEAD(&vm->used);
209 block = kzalloc(sizeof(*block), GFP_KERNEL);
210 if (NULL != block) {
211 block->addr = 0;
212 block->size = vm->size;
213 list_add(&block->list, &vm->unused);
214 }
215
216 *rvm = vm;
217 return 0;
218 }
219
220 /* The caller must ensure no mapping pages are being used
221 * by hardware before calling this function */
ct_vm_destroy(struct ct_vm * vm)222 void ct_vm_destroy(struct ct_vm *vm)
223 {
224 int i;
225 struct list_head *pos;
226 struct ct_vm_block *entry;
227
228 /* free used and unused list nodes */
229 while (!list_empty(&vm->used)) {
230 pos = vm->used.next;
231 list_del(pos);
232 entry = list_entry(pos, struct ct_vm_block, list);
233 kfree(entry);
234 }
235 while (!list_empty(&vm->unused)) {
236 pos = vm->unused.next;
237 list_del(pos);
238 entry = list_entry(pos, struct ct_vm_block, list);
239 kfree(entry);
240 }
241
242 /* free allocated page table pages */
243 for (i = 0; i < CT_PTP_NUM; i++)
244 snd_dma_free_pages(&vm->ptp[i]);
245
246 vm->size = 0;
247
248 kfree(vm);
249 }
250