1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz> 4 * Takashi Iwai <tiwai@suse.de> 5 * 6 * Generic memory allocators 7 */ 8 9 #include <linux/slab.h> 10 #include <linux/mm.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/genalloc.h> 13 #include <linux/vmalloc.h> 14 #ifdef CONFIG_X86 15 #include <asm/set_memory.h> 16 #endif 17 #include <sound/memalloc.h> 18 19 /* 20 * 21 * Bus-specific memory allocators 22 * 23 */ 24 25 #ifdef CONFIG_HAS_DMA 26 /* allocate the coherent DMA pages */ snd_malloc_dev_pages(struct snd_dma_buffer * dmab,size_t size)27 static void snd_malloc_dev_pages(struct snd_dma_buffer *dmab, size_t size) 28 { 29 gfp_t gfp_flags; 30 31 gfp_flags = GFP_KERNEL 32 | __GFP_COMP /* compound page lets parts be mapped */ 33 | __GFP_NORETRY /* don't trigger OOM-killer */ 34 | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ 35 dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, 36 gfp_flags); 37 #ifdef CONFIG_X86 38 if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC) 39 set_memory_wc((unsigned long)dmab->area, 40 PAGE_ALIGN(size) >> PAGE_SHIFT); 41 #endif 42 } 43 44 /* free the coherent DMA pages */ snd_free_dev_pages(struct snd_dma_buffer * dmab)45 static void snd_free_dev_pages(struct snd_dma_buffer *dmab) 46 { 47 #ifdef CONFIG_X86 48 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC) 49 set_memory_wb((unsigned long)dmab->area, 50 PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT); 51 #endif 52 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); 53 } 54 55 #ifdef CONFIG_GENERIC_ALLOCATOR 56 /** 57 * snd_malloc_dev_iram - allocate memory from on-chip internal ram 58 * @dmab: buffer allocation record to store the allocated data 59 * @size: number of bytes to allocate from the iram 60 * 61 * This function requires iram phandle provided via of_node 62 */ snd_malloc_dev_iram(struct snd_dma_buffer * dmab,size_t size)63 static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size) 64 { 65 struct device *dev = dmab->dev.dev; 66 struct gen_pool *pool = NULL; 67 68 dmab->area = NULL; 69 dmab->addr = 0; 70 71 if (dev->of_node) 72 pool = of_gen_pool_get(dev->of_node, "iram", 0); 73 74 if (!pool) 75 return; 76 77 /* Assign the pool into private_data field */ 78 dmab->private_data = pool; 79 80 dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr, 81 PAGE_SIZE); 82 } 83 84 /** 85 * snd_free_dev_iram - free allocated specific memory from on-chip internal ram 86 * @dmab: buffer allocation record to store the allocated data 87 */ snd_free_dev_iram(struct snd_dma_buffer * dmab)88 static void snd_free_dev_iram(struct snd_dma_buffer *dmab) 89 { 90 struct gen_pool *pool = dmab->private_data; 91 92 if (pool && dmab->area) 93 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); 94 } 95 #endif /* CONFIG_GENERIC_ALLOCATOR */ 96 #endif /* CONFIG_HAS_DMA */ 97 98 /* 99 * 100 * ALSA generic memory management 101 * 102 */ 103 snd_mem_get_gfp_flags(const struct device * dev,gfp_t default_gfp)104 static inline gfp_t snd_mem_get_gfp_flags(const struct device *dev, 105 gfp_t default_gfp) 106 { 107 if (!dev) 108 return default_gfp; 109 else 110 return (__force gfp_t)(unsigned long)dev; 111 } 112 113 /** 114 * snd_dma_alloc_pages - allocate the buffer area according to the given type 115 * @type: the DMA buffer type 116 * @device: the device pointer 117 * @size: the buffer size to allocate 118 * @dmab: buffer allocation record to store the allocated data 119 * 120 * Calls the memory-allocator function for the corresponding 121 * buffer type. 122 * 123 * Return: Zero if the buffer with the given size is allocated successfully, 124 * otherwise a negative value on error. 125 */ snd_dma_alloc_pages(int type,struct device * device,size_t size,struct snd_dma_buffer * dmab)126 int snd_dma_alloc_pages(int type, struct device *device, size_t size, 127 struct snd_dma_buffer *dmab) 128 { 129 gfp_t gfp; 130 131 if (WARN_ON(!size)) 132 return -ENXIO; 133 if (WARN_ON(!dmab)) 134 return -ENXIO; 135 136 dmab->dev.type = type; 137 dmab->dev.dev = device; 138 dmab->bytes = 0; 139 dmab->area = NULL; 140 dmab->addr = 0; 141 dmab->private_data = NULL; 142 switch (type) { 143 case SNDRV_DMA_TYPE_CONTINUOUS: 144 gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL); 145 dmab->area = alloc_pages_exact(size, gfp); 146 break; 147 case SNDRV_DMA_TYPE_VMALLOC: 148 gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL | __GFP_HIGHMEM); 149 dmab->area = __vmalloc(size, gfp); 150 break; 151 #ifdef CONFIG_HAS_DMA 152 #ifdef CONFIG_GENERIC_ALLOCATOR 153 case SNDRV_DMA_TYPE_DEV_IRAM: 154 snd_malloc_dev_iram(dmab, size); 155 if (dmab->area) 156 break; 157 /* Internal memory might have limited size and no enough space, 158 * so if we fail to malloc, try to fetch memory traditionally. 159 */ 160 dmab->dev.type = SNDRV_DMA_TYPE_DEV; 161 fallthrough; 162 #endif /* CONFIG_GENERIC_ALLOCATOR */ 163 case SNDRV_DMA_TYPE_DEV: 164 case SNDRV_DMA_TYPE_DEV_UC: 165 snd_malloc_dev_pages(dmab, size); 166 break; 167 #endif 168 #ifdef CONFIG_SND_DMA_SGBUF 169 case SNDRV_DMA_TYPE_DEV_SG: 170 case SNDRV_DMA_TYPE_DEV_UC_SG: 171 snd_malloc_sgbuf_pages(device, size, dmab, NULL); 172 break; 173 #endif 174 default: 175 pr_err("snd-malloc: invalid device type %d\n", type); 176 return -ENXIO; 177 } 178 if (! dmab->area) 179 return -ENOMEM; 180 dmab->bytes = size; 181 return 0; 182 } 183 EXPORT_SYMBOL(snd_dma_alloc_pages); 184 185 /** 186 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback 187 * @type: the DMA buffer type 188 * @device: the device pointer 189 * @size: the buffer size to allocate 190 * @dmab: buffer allocation record to store the allocated data 191 * 192 * Calls the memory-allocator function for the corresponding 193 * buffer type. When no space is left, this function reduces the size and 194 * tries to allocate again. The size actually allocated is stored in 195 * res_size argument. 196 * 197 * Return: Zero if the buffer with the given size is allocated successfully, 198 * otherwise a negative value on error. 199 */ snd_dma_alloc_pages_fallback(int type,struct device * device,size_t size,struct snd_dma_buffer * dmab)200 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, 201 struct snd_dma_buffer *dmab) 202 { 203 int err; 204 205 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { 206 if (err != -ENOMEM) 207 return err; 208 if (size <= PAGE_SIZE) 209 return -ENOMEM; 210 size >>= 1; 211 size = PAGE_SIZE << get_order(size); 212 } 213 if (! dmab->area) 214 return -ENOMEM; 215 return 0; 216 } 217 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); 218 219 220 /** 221 * snd_dma_free_pages - release the allocated buffer 222 * @dmab: the buffer allocation record to release 223 * 224 * Releases the allocated buffer via snd_dma_alloc_pages(). 225 */ snd_dma_free_pages(struct snd_dma_buffer * dmab)226 void snd_dma_free_pages(struct snd_dma_buffer *dmab) 227 { 228 switch (dmab->dev.type) { 229 case SNDRV_DMA_TYPE_CONTINUOUS: 230 free_pages_exact(dmab->area, dmab->bytes); 231 break; 232 case SNDRV_DMA_TYPE_VMALLOC: 233 vfree(dmab->area); 234 break; 235 #ifdef CONFIG_HAS_DMA 236 #ifdef CONFIG_GENERIC_ALLOCATOR 237 case SNDRV_DMA_TYPE_DEV_IRAM: 238 snd_free_dev_iram(dmab); 239 break; 240 #endif /* CONFIG_GENERIC_ALLOCATOR */ 241 case SNDRV_DMA_TYPE_DEV: 242 case SNDRV_DMA_TYPE_DEV_UC: 243 snd_free_dev_pages(dmab); 244 break; 245 #endif 246 #ifdef CONFIG_SND_DMA_SGBUF 247 case SNDRV_DMA_TYPE_DEV_SG: 248 case SNDRV_DMA_TYPE_DEV_UC_SG: 249 snd_free_sgbuf_pages(dmab); 250 break; 251 #endif 252 default: 253 pr_err("snd-malloc: invalid device type %d\n", dmab->dev.type); 254 } 255 } 256 EXPORT_SYMBOL(snd_dma_free_pages); 257