| /drivers/gpu/drm/nouveau/nvkm/core/ |
| D | memory.c | 30 nvkm_memory_tags_put(struct nvkm_memory *memory, struct nvkm_device *device, in nvkm_memory_tags_put() argument 39 kfree(memory->tags); in nvkm_memory_tags_put() 40 memory->tags = NULL; in nvkm_memory_tags_put() 48 nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device, in nvkm_memory_tags_get() argument 56 if ((tags = memory->tags)) { in nvkm_memory_tags_get() 94 *ptags = memory->tags = tags; in nvkm_memory_tags_get() 101 struct nvkm_memory *memory) in nvkm_memory_ctor() argument 103 memory->func = func; in nvkm_memory_ctor() 104 kref_init(&memory->kref); in nvkm_memory_ctor() 110 struct nvkm_memory *memory = container_of(kref, typeof(*memory), kref); in nvkm_memory_del() local [all …]
|
| D | firmware.c | 113 #define nvkm_firmware_mem(p) container_of((p), struct nvkm_firmware, mem.memory) 116 nvkm_firmware_mem_sgl(struct nvkm_memory *memory) in nvkm_firmware_mem_sgl() argument 118 struct nvkm_firmware *fw = nvkm_firmware_mem(memory); in nvkm_firmware_mem_sgl() 132 nvkm_firmware_mem_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, in nvkm_firmware_mem_map() argument 135 struct nvkm_firmware *fw = nvkm_firmware_mem(memory); in nvkm_firmware_mem_map() 137 .memory = &fw->mem.memory, in nvkm_firmware_mem_map() 139 .sgl = nvkm_firmware_mem_sgl(memory), in nvkm_firmware_mem_map() 149 nvkm_firmware_mem_size(struct nvkm_memory *memory) in nvkm_firmware_mem_size() argument 151 struct scatterlist *sgl = nvkm_firmware_mem_sgl(memory); in nvkm_firmware_mem_size() 157 nvkm_firmware_mem_addr(struct nvkm_memory *memory) in nvkm_firmware_mem_addr() argument [all …]
|
| D | gpuobj.c | 50 return nvkm_memory_map(gpuobj->memory, offset, vmm, vma, argv, argc); in nvkm_gpuobj_heap_map() 56 return nvkm_ro32(gpuobj->memory, offset); in nvkm_gpuobj_heap_rd32() 62 nvkm_wo32(gpuobj->memory, offset, data); in nvkm_gpuobj_heap_wr32() 70 nvkm_done(gpuobj->memory); in nvkm_gpuobj_heap_release() 92 gpuobj->map = nvkm_kmap(gpuobj->memory); in nvkm_gpuobj_heap_acquire() 202 abs(align), zero, &gpuobj->memory); in nvkm_gpuobj_ctor() 207 gpuobj->addr = nvkm_memory_addr(gpuobj->memory); in nvkm_gpuobj_ctor() 208 gpuobj->size = nvkm_memory_size(gpuobj->memory); in nvkm_gpuobj_ctor() 222 nvkm_memory_unref(&gpuobj->memory); in nvkm_gpuobj_del() 250 nvkm_gpuobj_wrap(struct nvkm_memory *memory, struct nvkm_gpuobj **pgpuobj) in nvkm_gpuobj_wrap() argument [all …]
|
| /drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
| D | mem.c | 22 #define nvkm_mem(p) container_of((p), struct nvkm_mem, memory) 31 struct nvkm_memory memory; member 43 nvkm_mem_target(struct nvkm_memory *memory) in nvkm_mem_target() argument 45 return nvkm_mem(memory)->target; in nvkm_mem_target() 49 nvkm_mem_page(struct nvkm_memory *memory) in nvkm_mem_page() argument 55 nvkm_mem_addr(struct nvkm_memory *memory) in nvkm_mem_addr() argument 57 struct nvkm_mem *mem = nvkm_mem(memory); in nvkm_mem_addr() 64 nvkm_mem_size(struct nvkm_memory *memory) in nvkm_mem_size() argument 66 return nvkm_mem(memory)->pages << PAGE_SHIFT; in nvkm_mem_size() 70 nvkm_mem_map_dma(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, in nvkm_mem_map_dma() argument [all …]
|
| D | umem.c | 38 struct nvkm_memory *memory = NULL; in nvkm_umem_search() local 48 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search() 56 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search() 59 return memory ? memory : ERR_PTR(-ENOENT); in nvkm_umem_search() 98 int ret = nvkm_mem_map_host(umem->memory, &umem->map); in nvkm_umem_map() 103 *length = nvkm_memory_size(umem->memory); in nvkm_umem_map() 109 int ret = mmu->func->mem.umap(mmu, umem->memory, argv, argc, in nvkm_umem_map() 130 nvkm_memory_unref(&umem->memory); in nvkm_umem_dtor() 178 &umem->memory); in nvkm_umem_new() 186 args->v0.page = nvkm_memory_page(umem->memory); in nvkm_umem_new() [all …]
|
| D | vmmgp100.c | 40 nvkm_kmap(pt->memory); in gp100_vmm_pfn_unmap() 42 u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0); in gp100_vmm_pfn_unmap() 43 u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4); in gp100_vmm_pfn_unmap() 51 nvkm_done(pt->memory); in gp100_vmm_pfn_unmap() 59 nvkm_kmap(pt->memory); in gp100_vmm_pfn_clear() 61 u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0); in gp100_vmm_pfn_clear() 62 u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4); in gp100_vmm_pfn_clear() 70 nvkm_done(pt->memory); in gp100_vmm_pfn_clear() 81 nvkm_kmap(pt->memory); in gp100_vmm_pgt_pfn() 111 nvkm_done(pt->memory); in gp100_vmm_pgt_pfn() [all …]
|
| D | uvmm.c | 138 if (ret = -EINVAL, !vma->memory) { in nvkm_uvmm_mthd_unmap() 160 struct nvkm_memory *memory; in nvkm_uvmm_mthd_map() local 166 handle = args->v0.memory; in nvkm_uvmm_mthd_map() 174 memory = nvkm_umem_search(client, handle); in nvkm_uvmm_mthd_map() 175 if (IS_ERR(memory)) { in nvkm_uvmm_mthd_map() 176 VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory)); in nvkm_uvmm_mthd_map() 177 return PTR_ERR(memory); in nvkm_uvmm_mthd_map() 191 if (ret = -EINVAL, vma->mapped && !vma->memory) { in nvkm_uvmm_mthd_map() 197 if (addr + size > vma->addr + vma->size || vma->memory || in nvkm_uvmm_mthd_map() 201 !!vma->memory, vma->refd, vma->mapref, in nvkm_uvmm_mthd_map() [all …]
|
| /drivers/gpu/drm/nouveau/nvkm/subdev/instmem/ |
| D | base.c | 34 struct nvkm_memory *memory = &iobj->memory; in nvkm_instobj_load() local 35 const u64 size = nvkm_memory_size(memory); in nvkm_instobj_load() 39 if (!(map = nvkm_kmap(memory))) { in nvkm_instobj_load() 41 nvkm_wo32(memory, i, iobj->suspend[i / 4]); in nvkm_instobj_load() 45 nvkm_done(memory); in nvkm_instobj_load() 54 struct nvkm_memory *memory = &iobj->memory; in nvkm_instobj_save() local 55 const u64 size = nvkm_memory_size(memory); in nvkm_instobj_save() 63 if (!(map = nvkm_kmap(memory))) { in nvkm_instobj_save() 65 iobj->suspend[i / 4] = nvkm_ro32(memory, i); in nvkm_instobj_save() 69 nvkm_done(memory); in nvkm_instobj_save() [all …]
|
| D | nv50.c | 44 #define nv50_instobj(p) container_of((p), struct nv50_instobj, base.memory) 57 nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data) in nv50_instobj_wr32_slow() argument 59 struct nv50_instobj *iobj = nv50_instobj(memory); in nv50_instobj_wr32_slow() 76 nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset) in nv50_instobj_rd32_slow() argument 78 struct nv50_instobj *iobj = nv50_instobj(memory); in nv50_instobj_rd32_slow() 103 nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) in nv50_instobj_wr32() argument 105 iowrite32_native(data, nv50_instobj(memory)->map + offset); in nv50_instobj_wr32() 109 nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset) in nv50_instobj_rd32() argument 111 return ioread32_native(nv50_instobj(memory)->map + offset); in nv50_instobj_rd32() 125 struct nvkm_memory *memory = &iobj->base.memory; in nv50_instobj_kmap() local [all …]
|
| D | gk20a.c | 59 #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, base.memory) 116 gk20a_instobj_target(struct nvkm_memory *memory) in gk20a_instobj_target() argument 122 gk20a_instobj_page(struct nvkm_memory *memory) in gk20a_instobj_page() argument 128 gk20a_instobj_addr(struct nvkm_memory *memory) in gk20a_instobj_addr() argument 130 return (u64)gk20a_instobj(memory)->mn->offset << 12; in gk20a_instobj_addr() 134 gk20a_instobj_size(struct nvkm_memory *memory) in gk20a_instobj_size() argument 136 return (u64)gk20a_instobj(memory)->mn->length << 12; in gk20a_instobj_size() 151 imem->vaddr_use -= nvkm_memory_size(&obj->base.base.memory); in gk20a_instobj_iommu_recycle_vaddr() 174 gk20a_instobj_acquire_dma(struct nvkm_memory *memory) in gk20a_instobj_acquire_dma() argument 176 struct gk20a_instobj *node = gk20a_instobj(memory); in gk20a_instobj_acquire_dma() [all …]
|
| D | nv04.c | 38 #define nv04_instobj(p) container_of((p), struct nv04_instobj, base.memory) 47 nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) in nv04_instobj_wr32() argument 49 struct nv04_instobj *iobj = nv04_instobj(memory); in nv04_instobj_wr32() 55 nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset) in nv04_instobj_rd32() argument 57 struct nv04_instobj *iobj = nv04_instobj(memory); in nv04_instobj_rd32() 69 nv04_instobj_release(struct nvkm_memory *memory) in nv04_instobj_release() argument 74 nv04_instobj_acquire(struct nvkm_memory *memory) in nv04_instobj_acquire() argument 76 struct nv04_instobj *iobj = nv04_instobj(memory); in nv04_instobj_acquire() 82 nv04_instobj_size(struct nvkm_memory *memory) in nv04_instobj_size() argument 84 return nv04_instobj(memory)->node->length; in nv04_instobj_size() [all …]
|
| D | nv40.c | 39 #define nv40_instobj(p) container_of((p), struct nv40_instobj, base.memory) 48 nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) in nv40_instobj_wr32() argument 50 struct nv40_instobj *iobj = nv40_instobj(memory); in nv40_instobj_wr32() 55 nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset) in nv40_instobj_rd32() argument 57 struct nv40_instobj *iobj = nv40_instobj(memory); in nv40_instobj_rd32() 68 nv40_instobj_release(struct nvkm_memory *memory) in nv40_instobj_release() argument 74 nv40_instobj_acquire(struct nvkm_memory *memory) in nv40_instobj_acquire() argument 76 struct nv40_instobj *iobj = nv40_instobj(memory); in nv40_instobj_acquire() 81 nv40_instobj_size(struct nvkm_memory *memory) in nv40_instobj_size() argument 83 return nv40_instobj(memory)->node->length; in nv40_instobj_size() [all …]
|
| /drivers/staging/octeon/ |
| D | ethernet-mem.c | 49 char *memory; in cvm_oct_free_hw_skbuff() local 52 memory = cvmx_fpa_alloc(pool); in cvm_oct_free_hw_skbuff() 53 if (memory) { in cvm_oct_free_hw_skbuff() 55 *(struct sk_buff **)(memory - sizeof(void *)); in cvm_oct_free_hw_skbuff() 59 } while (memory); in cvm_oct_free_hw_skbuff() 79 char *memory; in cvm_oct_fill_hw_memory() local 94 memory = kmalloc(size + 256, GFP_ATOMIC); in cvm_oct_fill_hw_memory() 95 if (unlikely(!memory)) { in cvm_oct_fill_hw_memory() 100 fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL); in cvm_oct_fill_hw_memory() 101 *((char **)fpa - 1) = memory; in cvm_oct_fill_hw_memory() [all …]
|
| /drivers/gpu/drm/nouveau/nvkm/subdev/fb/ |
| D | ram.c | 24 #define nvkm_vram(p) container_of((p), struct nvkm_vram, memory) 32 struct nvkm_memory memory; member 39 nvkm_vram_kmap(struct nvkm_memory *memory, struct nvkm_memory **pmemory) in nvkm_vram_kmap() argument 41 return nvkm_instobj_wrap(nvkm_vram(memory)->ram->fb->subdev.device, memory, pmemory); in nvkm_vram_kmap() 45 nvkm_vram_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, in nvkm_vram_map() argument 48 struct nvkm_vram *vram = nvkm_vram(memory); in nvkm_vram_map() 50 .memory = &vram->memory, in nvkm_vram_map() 59 nvkm_vram_size(struct nvkm_memory *memory) in nvkm_vram_size() argument 61 return (u64)nvkm_mm_size(nvkm_vram(memory)->mn) << NVKM_RAM_MM_SHIFT; in nvkm_vram_size() 65 nvkm_vram_addr(struct nvkm_memory *memory) in nvkm_vram_addr() argument [all …]
|
| /drivers/dax/ |
| D | Kconfig | 3 tristate "DAX: direct access to differentiated memory" 13 latency...) memory via an mmap(2) capable character 15 platform memory resource that is differentiated from the 16 baseline memory pool. Mappings of a /dev/daxX.Y device impose 20 tristate "PMEM DAX: direct access to persistent memory" 24 Support raw access to persistent memory. Note that this 25 driver consumes memory ranges allocated and exported by the 31 tristate "HMEM DAX: direct access to 'specific purpose' memory" 37 memory. For example, a high bandwidth memory pool. The 39 memory from typical usage by default. This driver creates [all …]
|
| /drivers/md/dm-vdo/indexer/ |
| D | delta-index.c | 171 memmove(delta_zone->memory + destination, in rebalance_delta_zone() 172 delta_zone->memory + source, in rebalance_delta_zone() 233 memset(zone->memory + (list_bits / BITS_PER_BYTE), ~0, in uds_reset_delta_index() 301 vdo_free(vdo_forget(delta_index->delta_zones[z].memory)); in uds_uninitialize_delta_index() 314 result = vdo_allocate(size, u8, "delta list", &delta_zone->memory); in initialize_delta_zone() 404 static inline u32 get_field(const u8 *memory, u64 offset, u8 size) in get_field() argument 406 const void *addr = memory + offset / BITS_PER_BYTE; in get_field() 412 static inline void set_field(u32 value, u8 *memory, u64 offset, u8 size) in set_field() argument 414 void *addr = memory + offset / BITS_PER_BYTE; in set_field() 431 static inline u32 get_immutable_start(const u8 *memory, u32 list_number) in get_immutable_start() argument [all …]
|
| /drivers/nvdimm/ |
| D | Kconfig | 9 Generic support for non-volatile memory devices including 12 bus is registered to advertise PMEM (persistent memory) 14 memory resource that may span multiple DIMMs and support DAX 20 tristate "PMEM: Persistent memory block device support" 28 non-standard OEM-specific E820 memory type (type-12, see 32 these persistent memory ranges into block devices that are 50 update semantics for persistent memory devices, so that 63 bool "PFN: Map persistent (device) memory" 68 Map persistent memory, i.e. advertise it to the memory 69 management sub-system. By default persistent memory does [all …]
|
| /drivers/cxl/ |
| D | Kconfig | 15 memory targets, the CXL.io protocol is equivalent to PCI Express. 25 The CXL specification defines a "CXL memory device" sub-class in the 26 PCI "memory controller" base class of devices. Device's identified by 28 memory to be mapped into the system address map (Host-managed Device 31 Say 'y/m' to enable a driver that will attach to CXL memory expander 32 devices enumerated by the memory device class code for configuration 52 potential impact to memory currently in use by the kernel. 65 Enable support for host managed device memory (HDM) resources 66 published by a platform's ACPI CXL memory layout description. See 81 In addition to typical memory resources a platform may also advertise [all …]
|
| /drivers/gpu/drm/amd/amdkfd/ |
| D | Kconfig | 16 bool "Enable HMM-based shared virtual memory manager" 22 Enable this to use unified memory and managed memory in HIP. This 23 memory manager supports two modes of operation. One based on 25 based memory management on most GFXv9 GPUs, set the module 35 in peer GPUs' memory without intermediate copies in system memory. 38 GPUs with large memory BARs that expose the entire VRAM in PCIe bus
|
| /drivers/xen/ |
| D | Kconfig | 6 bool "Xen memory balloon driver" 9 The balloon driver allows the Xen domain to request more memory from 10 the system to expand the domain's memory allocation, or alternatively 11 return unneeded memory to the system. 18 Memory hotplug support for Xen balloon driver allows expanding memory 24 memory ranges to use in order to map foreign memory or grants. 28 1) target domain: ensure that memory auto online policy is in 29 effect by checking /sys/devices/system/memory/auto_online_blocks 33 where <maxmem> is >= requested memory size, 35 3) control domain: xl mem-set <target-domain> <memory> [all …]
|
| /drivers/gpu/drm/nouveau/nvkm/engine/fifo/ |
| D | gv100.c | 119 const u64 bar2 = cctx ? nvkm_memory_bar2(cctx->vctx->inst->memory) : 0ULL; in gv100_ectx_ce_bind() 131 if (nvkm_memory_bar2(vctx->inst->memory) == ~0ULL) in gv100_ectx_ce_ctor() 183 gv100_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset) in gv100_runl_insert_chan() argument 188 nvkm_wo32(memory, offset + 0x0, lower_32_bits(user) | chan->runq << 1); in gv100_runl_insert_chan() 189 nvkm_wo32(memory, offset + 0x4, upper_32_bits(user)); in gv100_runl_insert_chan() 190 nvkm_wo32(memory, offset + 0x8, lower_32_bits(inst) | chan->id); in gv100_runl_insert_chan() 191 nvkm_wo32(memory, offset + 0xc, upper_32_bits(inst)); in gv100_runl_insert_chan() 195 gv100_runl_insert_cgrp(struct nvkm_cgrp *cgrp, struct nvkm_memory *memory, u64 offset) in gv100_runl_insert_cgrp() argument 197 nvkm_wo32(memory, offset + 0x0, (128 << 24) | (3 << 16) | 0x00000001); in gv100_runl_insert_cgrp() 198 nvkm_wo32(memory, offset + 0x4, cgrp->chan_nr); in gv100_runl_insert_cgrp() [all …]
|
| D | nv50.c | 238 nv50_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count) in nv50_runl_commit() argument 241 u64 addr = nvkm_memory_addr(memory) + start; in nv50_runl_commit() 248 nv50_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset) in nv50_runl_insert_chan() argument 250 nvkm_wo32(memory, offset, chan->id); in nv50_runl_insert_chan() 287 struct nvkm_memory *memory; in nv50_runl_update() local 295 memory = nv50_runl_alloc(runl, &start); in nv50_runl_update() 296 if (IS_ERR(memory)) in nv50_runl_update() 297 return PTR_ERR(memory); in nv50_runl_update() 302 nvkm_kmap(memory); in nv50_runl_update() 306 runl->func->insert_cgrp(cgrp, memory, offset); in nv50_runl_update() [all …]
|
| /drivers/media/platform/samsung/exynos4-is/ |
| D | fimc-is.c | 242 buf = is->memory.vaddr + is->setfile.base; in fimc_is_load_setfile() 247 pr_debug("mem vaddr: %p, setfile buf: %p\n", is->memory.vaddr, buf); in fimc_is_load_setfile() 272 mcuctl_write(is->memory.addr, is, MCUCTL_REG_BBOAR); in fimc_is_cpu_set_power() 318 memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size); in fimc_is_start_firmware() 338 is->memory.vaddr = dma_alloc_coherent(dev, FIMC_IS_CPU_MEM_SIZE, in fimc_is_alloc_cpu_memory() 339 &is->memory.addr, GFP_KERNEL); in fimc_is_alloc_cpu_memory() 340 if (is->memory.vaddr == NULL) in fimc_is_alloc_cpu_memory() 343 is->memory.size = FIMC_IS_CPU_MEM_SIZE; in fimc_is_alloc_cpu_memory() 345 dev_info(dev, "FIMC-IS CPU memory base: %pad\n", &is->memory.addr); in fimc_is_alloc_cpu_memory() 347 if (((u32)is->memory.addr) & FIMC_IS_FW_ADDR_MASK) { in fimc_is_alloc_cpu_memory() [all …]
|
| /drivers/memory/tegra/ |
| D | Kconfig | 23 This driver is required to change memory timings / clock rate for 24 external memory. 35 This driver is required to change memory timings / clock rate for 36 external memory. 47 This driver is required to change memory timings / clock rate for 48 external memory. 61 This driver is required to change memory timings / clock rate for 62 external memory.
|
| /drivers/staging/android/ |
| D | Kconfig | 12 The ashmem subsystem is a new shared memory allocator, similar to 16 It is, in theory, a good memory allocator for low-memory devices, 17 because it can discard shared memory units when under memory pressure.
|