/drivers/infiniband/sw/rxe/ |
D | rxe_mr.c | 54 int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length) in mem_check_range() argument 56 switch (mem->type) { in mem_check_range() 62 if (iova < mem->iova || in mem_check_range() 63 length > mem->length || in mem_check_range() 64 iova > mem->iova + mem->length - length) in mem_check_range() 77 static void rxe_mem_init(int access, struct rxe_mem *mem) in rxe_mem_init() argument 79 u32 lkey = mem->pelem.index << 8 | rxe_get_key(); in rxe_mem_init() 82 if (mem->pelem.pool->type == RXE_TYPE_MR) { in rxe_mem_init() 83 mem->ibmr.lkey = lkey; in rxe_mem_init() 84 mem->ibmr.rkey = rkey; in rxe_mem_init() [all …]
|
/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | mem.c | 35 struct page **mem; member 57 struct nvkm_mem *mem = nvkm_mem(memory); in nvkm_mem_addr() local 58 if (mem->pages == 1 && mem->mem) in nvkm_mem_addr() 59 return mem->dma[0]; in nvkm_mem_addr() 73 struct nvkm_mem *mem = nvkm_mem(memory); in nvkm_mem_map_dma() local 75 .memory = &mem->memory, in nvkm_mem_map_dma() 77 .dma = mem->dma, in nvkm_mem_map_dma() 85 struct nvkm_mem *mem = nvkm_mem(memory); in nvkm_mem_dtor() local 86 if (mem->mem) { in nvkm_mem_dtor() 87 while (mem->pages--) { in nvkm_mem_dtor() [all …]
|
/drivers/gpu/drm/nouveau/ |
D | nouveau_mem.c | 36 nouveau_mem_map(struct nouveau_mem *mem, in nouveau_mem_map() argument 54 args.nv50.kind = mem->kind; in nouveau_mem_map() 55 args.nv50.comp = mem->comp; in nouveau_mem_map() 62 if (mem->mem.type & NVIF_MEM_VRAM) in nouveau_mem_map() 68 args.gf100.kind = mem->kind; in nouveau_mem_map() 78 ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, in nouveau_mem_map() 79 &mem->mem, 0); in nouveau_mem_map() 85 nouveau_mem_fini(struct nouveau_mem *mem) in nouveau_mem_fini() argument 87 nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[1]); in nouveau_mem_fini() 88 nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[0]); in nouveau_mem_fini() [all …]
|
D | nouveau_vmm.c | 31 if (vma->mem) { in nouveau_vma_unmap() 33 vma->mem = NULL; in nouveau_vma_unmap() 38 nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem) in nouveau_vma_map() argument 41 int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp); in nouveau_vma_map() 44 vma->mem = mem; in nouveau_vma_map() 80 struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem); in nouveau_vma_new() local 95 vma->mem = NULL; in nouveau_vma_new() 99 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM && in nouveau_vma_new() 100 mem->mem.page == nvbo->page) { in nouveau_vma_new() 101 ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0, in nouveau_vma_new() [all …]
|
/drivers/media/v4l2-core/ |
D | videobuf-dma-contig.c | 38 struct videobuf_dma_contig_memory *mem, in __videobuf_dc_alloc() argument 41 mem->size = size; in __videobuf_dc_alloc() 42 mem->vaddr = dma_alloc_coherent(dev, mem->size, in __videobuf_dc_alloc() 43 &mem->dma_handle, flags); in __videobuf_dc_alloc() 45 if (!mem->vaddr) { in __videobuf_dc_alloc() 46 dev_err(dev, "memory alloc size %ld failed\n", mem->size); in __videobuf_dc_alloc() 50 dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size); in __videobuf_dc_alloc() 56 struct videobuf_dma_contig_memory *mem) in __videobuf_dc_free() argument 58 dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle); in __videobuf_dc_free() 60 mem->vaddr = NULL; in __videobuf_dc_free() [all …]
|
D | videobuf-vmalloc.c | 72 struct videobuf_vmalloc_memory *mem; in videobuf_vm_close() local 88 mem = q->bufs[i]->priv; in videobuf_vm_close() 89 if (mem) { in videobuf_vm_close() 96 MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM); in videobuf_vm_close() 102 __func__, i, mem->vaddr); in videobuf_vm_close() 104 vfree(mem->vaddr); in videobuf_vm_close() 105 mem->vaddr = NULL; in videobuf_vm_close() 137 struct videobuf_vmalloc_memory *mem; in __videobuf_alloc_vb() local 140 vb = kzalloc(size + sizeof(*mem), GFP_KERNEL); in __videobuf_alloc_vb() 144 mem = vb->priv = ((char *)vb) + size; in __videobuf_alloc_vb() [all …]
|
D | videobuf-dma-sg.c | 136 struct videobuf_dma_sg_memory *mem = buf->priv; in videobuf_to_dma() local 137 BUG_ON(!mem); in videobuf_to_dma() 139 MAGIC_CHECK(mem->magic, MAGIC_SG_MEM); in videobuf_to_dma() 141 return &mem->dma; in videobuf_to_dma() 398 struct videobuf_dma_sg_memory *mem; in videobuf_vm_close() local 411 mem = q->bufs[i]->priv; in videobuf_vm_close() 412 if (!mem) in videobuf_vm_close() 415 MAGIC_CHECK(mem->magic, MAGIC_SG_MEM); in videobuf_vm_close() 470 struct videobuf_dma_sg_memory *mem; in __videobuf_alloc_vb() local 473 vb = kzalloc(size + sizeof(*mem), GFP_KERNEL); in __videobuf_alloc_vb() [all …]
|
/drivers/pci/endpoint/ |
D | pci-epc-mem.c | 23 static int pci_epc_mem_get_order(struct pci_epc_mem *mem, size_t size) in pci_epc_mem_get_order() argument 26 unsigned int page_shift = ilog2(mem->page_size); in pci_epc_mem_get_order() 52 struct pci_epc_mem *mem; in __pci_epc_mem_init() local 65 mem = kzalloc(sizeof(*mem), GFP_KERNEL); in __pci_epc_mem_init() 66 if (!mem) { in __pci_epc_mem_init() 77 mem->bitmap = bitmap; in __pci_epc_mem_init() 78 mem->phys_base = phys_base; in __pci_epc_mem_init() 79 mem->page_size = page_size; in __pci_epc_mem_init() 80 mem->pages = pages; in __pci_epc_mem_init() 81 mem->size = size; in __pci_epc_mem_init() [all …]
|
/drivers/infiniband/sw/siw/ |
D | siw_mem.c | 50 struct siw_mem *mem; in siw_mem_id2obj() local 53 mem = xa_load(&sdev->mem_xa, stag_index); in siw_mem_id2obj() 54 if (likely(mem && kref_get_unless_zero(&mem->ref))) { in siw_mem_id2obj() 56 return mem; in siw_mem_id2obj() 93 struct siw_mem *mem = kzalloc(sizeof(*mem), GFP_KERNEL); in siw_mr_add_mem() local 97 if (!mem) in siw_mr_add_mem() 100 mem->mem_obj = mem_obj; in siw_mr_add_mem() 101 mem->stag_valid = 0; in siw_mr_add_mem() 102 mem->sdev = sdev; in siw_mr_add_mem() 103 mem->va = start; in siw_mr_add_mem() [all …]
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_amdkfd_gpuvm.c | 84 struct kgd_mem *mem) in check_if_add_bo_to_vm() argument 88 list_for_each_entry(entry, &mem->bo_va_list, bo_list) in check_if_add_bo_to_vm() 102 uint64_t mem; in amdgpu_amdkfd_gpuvm_init_mem_limits() local 105 mem = si.totalram - si.totalhigh; in amdgpu_amdkfd_gpuvm_init_mem_limits() 106 mem *= si.mem_unit; in amdgpu_amdkfd_gpuvm_init_mem_limits() 109 kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2); in amdgpu_amdkfd_gpuvm_init_mem_limits() 110 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3); in amdgpu_amdkfd_gpuvm_init_mem_limits() 371 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, in add_bo_to_vm() argument 377 struct amdgpu_bo *bo = mem->bo; in add_bo_to_vm() 378 uint64_t va = mem->va; in add_bo_to_vm() [all …]
|
D | amdgpu_gtt_mgr.c | 151 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem) in amdgpu_gtt_mgr_has_gart_addr() argument 153 struct amdgpu_gtt_node *node = mem->mm_node; in amdgpu_gtt_mgr_has_gart_addr() 171 struct ttm_mem_reg *mem) in amdgpu_gtt_mgr_alloc() argument 175 struct amdgpu_gtt_node *node = mem->mm_node; in amdgpu_gtt_mgr_alloc() 180 if (amdgpu_gtt_mgr_has_gart_addr(mem)) in amdgpu_gtt_mgr_alloc() 198 r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages, in amdgpu_gtt_mgr_alloc() 199 mem->page_alignment, 0, fpfn, lpfn, in amdgpu_gtt_mgr_alloc() 204 mem->start = node->node.start; in amdgpu_gtt_mgr_alloc() 222 struct ttm_mem_reg *mem) in amdgpu_gtt_mgr_new() argument 229 if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) && in amdgpu_gtt_mgr_new() [all …]
|
D | amdgpu_vram_mgr.c | 218 struct ttm_mem_reg *mem = &bo->tbo.mem; in amdgpu_vram_mgr_bo_visible_size() local 219 struct drm_mm_node *nodes = mem->mm_node; in amdgpu_vram_mgr_bo_visible_size() 220 unsigned pages = mem->num_pages; in amdgpu_vram_mgr_bo_visible_size() 226 if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) in amdgpu_vram_mgr_bo_visible_size() 244 static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem, in amdgpu_vram_mgr_virt_start() argument 250 if (start > mem->num_pages) in amdgpu_vram_mgr_virt_start() 251 start -= mem->num_pages; in amdgpu_vram_mgr_virt_start() 254 mem->start = max(mem->start, start); in amdgpu_vram_mgr_virt_start() 270 struct ttm_mem_reg *mem) in amdgpu_vram_mgr_new() argument 287 mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; in amdgpu_vram_mgr_new() [all …]
|
/drivers/base/ |
D | memory.c | 92 struct memory_block *mem = to_memory_block(dev); in memory_block_release() local 94 kfree(mem); in memory_block_release() 109 struct memory_block *mem = to_memory_block(dev); in phys_index_show() local 112 phys_index = mem->start_section_nr / sections_per_block; in phys_index_show() 125 struct memory_block *mem = to_memory_block(dev); in removable_show() local 129 if (mem->state != MEM_ONLINE) in removable_show() 133 if (!present_section_nr(mem->start_section_nr + i)) in removable_show() 135 pfn = section_nr_to_pfn(mem->start_section_nr + i); in removable_show() 149 struct memory_block *mem = to_memory_block(dev); in state_show() local 156 switch (mem->state) { in state_show() [all …]
|
/drivers/spi/ |
D | spi-mem.c | 102 static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx) in spi_check_buswidth_req() argument 104 u32 mode = mem->spi->mode; in spi_check_buswidth_req() 138 bool spi_mem_default_supports_op(struct spi_mem *mem, in spi_mem_default_supports_op() argument 141 if (spi_check_buswidth_req(mem, op->cmd.buswidth, true)) in spi_mem_default_supports_op() 145 spi_check_buswidth_req(mem, op->addr.buswidth, true)) in spi_mem_default_supports_op() 149 spi_check_buswidth_req(mem, op->dummy.buswidth, true)) in spi_mem_default_supports_op() 153 spi_check_buswidth_req(mem, op->data.buswidth, in spi_mem_default_supports_op() 188 static bool spi_mem_internal_supports_op(struct spi_mem *mem, in spi_mem_internal_supports_op() argument 191 struct spi_controller *ctlr = mem->spi->controller; in spi_mem_internal_supports_op() 194 return ctlr->mem_ops->supports_op(mem, op); in spi_mem_internal_supports_op() [all …]
|
/drivers/char/hw_random/ |
D | intel-rng.c | 155 static inline u8 hwstatus_get(void __iomem *mem) in hwstatus_get() argument 157 return readb(mem + INTEL_RNG_HW_STATUS); in hwstatus_get() 160 static inline u8 hwstatus_set(void __iomem *mem, in hwstatus_set() argument 163 writeb(hw_status, mem + INTEL_RNG_HW_STATUS); in hwstatus_set() 164 return hwstatus_get(mem); in hwstatus_set() 169 void __iomem *mem = (void __iomem *)rng->priv; in intel_rng_data_present() local 173 data = !!(readb(mem + INTEL_RNG_STATUS) & in intel_rng_data_present() 184 void __iomem *mem = (void __iomem *)rng->priv; in intel_rng_data_read() local 186 *data = readb(mem + INTEL_RNG_DATA); in intel_rng_data_read() 193 void __iomem *mem = (void __iomem *)rng->priv; in intel_rng_init() local [all …]
|
/drivers/firmware/efi/ |
D | cper.c | 216 static int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg) in cper_mem_err_location() argument 225 if (mem->validation_bits & CPER_MEM_VALID_NODE) in cper_mem_err_location() 226 n += scnprintf(msg + n, len - n, "node: %d ", mem->node); in cper_mem_err_location() 227 if (mem->validation_bits & CPER_MEM_VALID_CARD) in cper_mem_err_location() 228 n += scnprintf(msg + n, len - n, "card: %d ", mem->card); in cper_mem_err_location() 229 if (mem->validation_bits & CPER_MEM_VALID_MODULE) in cper_mem_err_location() 230 n += scnprintf(msg + n, len - n, "module: %d ", mem->module); in cper_mem_err_location() 231 if (mem->validation_bits & CPER_MEM_VALID_RANK_NUMBER) in cper_mem_err_location() 232 n += scnprintf(msg + n, len - n, "rank: %d ", mem->rank); in cper_mem_err_location() 233 if (mem->validation_bits & CPER_MEM_VALID_BANK) in cper_mem_err_location() [all …]
|
/drivers/gpu/drm/ttm/ |
D | ttm_bo_util.c | 50 ttm_bo_mem_put(bo, &bo->mem); in ttm_bo_free_old_node() 58 struct ttm_mem_reg *old_mem = &bo->mem; in ttm_bo_move_ttm() 134 struct ttm_mem_reg *mem) in ttm_mem_io_reserve() argument 136 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; in ttm_mem_io_reserve() 142 return bdev->driver->io_mem_reserve(bdev, mem); in ttm_mem_io_reserve() 145 mem->bus.io_reserved_count++ == 0) { in ttm_mem_io_reserve() 147 ret = bdev->driver->io_mem_reserve(bdev, mem); in ttm_mem_io_reserve() 159 struct ttm_mem_reg *mem) in ttm_mem_io_free() argument 161 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; in ttm_mem_io_free() 167 --mem->bus.io_reserved_count == 0 && in ttm_mem_io_free() [all …]
|
D | ttm_agp_backend.c | 47 struct agp_memory *mem; member 56 struct agp_memory *mem; in ttm_agp_bind() local 60 mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY); in ttm_agp_bind() 61 if (unlikely(mem == NULL)) in ttm_agp_bind() 64 mem->page_count = 0; in ttm_agp_bind() 71 mem->pages[mem->page_count++] = page; in ttm_agp_bind() 73 agp_be->mem = mem; in ttm_agp_bind() 75 mem->is_flushed = 1; in ttm_agp_bind() 76 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; in ttm_agp_bind() 78 ret = agp_bind_memory(mem, node->start); in ttm_agp_bind() [all …]
|
D | ttm_bo.c | 102 bo, bo->mem.num_pages, bo->mem.size >> 10, in ttm_bo_mem_space_debug() 103 bo->mem.size >> 20); in ttm_bo_mem_space_debug() 157 BUG_ON(bo->mem.mm_node != NULL); in ttm_bo_release_list() 171 struct ttm_mem_reg *mem) in ttm_bo_add_mem_to_lru() argument 181 if (mem->placement & TTM_PL_FLAG_NO_EVICT) in ttm_bo_add_mem_to_lru() 184 man = &bdev->man[mem->mem_type]; in ttm_bo_add_mem_to_lru() 198 ttm_bo_add_mem_to_lru(bo, &bo->mem); in ttm_bo_add_to_lru() 253 if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { in ttm_bo_move_to_lru_tail() 254 switch (bo->mem.mem_type) { in ttm_bo_move_to_lru_tail() 321 struct ttm_mem_reg *mem, bool evict, in ttm_bo_handle_move_mem() argument [all …]
|
/drivers/gpu/drm/nouveau/nvif/ |
D | mem.c | 28 nvif_mem_init_map(struct nvif_mmu *mmu, u8 type, u64 size, struct nvif_mem *mem) in nvif_mem_init_map() argument 30 int ret = nvif_mem_init(mmu, mmu->mem, NVIF_MEM_MAPPABLE | type, 0, in nvif_mem_init_map() 31 size, NULL, 0, mem); in nvif_mem_init_map() 33 ret = nvif_object_map(&mem->object, NULL, 0); in nvif_mem_init_map() 35 nvif_mem_fini(mem); in nvif_mem_init_map() 41 nvif_mem_fini(struct nvif_mem *mem) in nvif_mem_fini() argument 43 nvif_object_fini(&mem->object); in nvif_mem_fini() 48 u64 size, void *argv, u32 argc, struct nvif_mem *mem) in nvif_mem_init_type() argument 54 mem->object.client = NULL; in nvif_mem_init_type() 71 sizeof(*args) + argc, &mem->object); in nvif_mem_init_type() [all …]
|
/drivers/media/platform/mtk-vcodec/ |
D | mtk_vcodec_util.c | 38 struct mtk_vcodec_mem *mem) in mtk_vcodec_mem_alloc() argument 40 unsigned long size = mem->size; in mtk_vcodec_mem_alloc() 44 mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL); in mtk_vcodec_mem_alloc() 45 if (!mem->va) { in mtk_vcodec_mem_alloc() 51 mtk_v4l2_debug(3, "[%d] - va = %p", ctx->id, mem->va); in mtk_vcodec_mem_alloc() 53 (unsigned long)mem->dma_addr); in mtk_vcodec_mem_alloc() 61 struct mtk_vcodec_mem *mem) in mtk_vcodec_mem_free() argument 63 unsigned long size = mem->size; in mtk_vcodec_mem_free() 67 if (!mem->va) { in mtk_vcodec_mem_free() 73 mtk_v4l2_debug(3, "[%d] - va = %p", ctx->id, mem->va); in mtk_vcodec_mem_free() [all …]
|
/drivers/uio/ |
D | uio_mf624.c | 35 void __iomem *INTCSR_reg = info->mem[0].internal_addr + INTCSR; in mf624_disable_interrupt() 63 void __iomem *INTCSR_reg = info->mem[0].internal_addr + INTCSR; in mf624_enable_interrupt() 90 void __iomem *INTCSR_reg = info->mem[0].internal_addr + INTCSR; in mf624_irq_handler() 117 static int mf624_setup_mem(struct pci_dev *dev, int bar, struct uio_mem *mem, const char *name) in mf624_setup_mem() argument 122 mem->name = name; in mf624_setup_mem() 123 mem->addr = start & PAGE_MASK; in mf624_setup_mem() 124 mem->offs = start & ~PAGE_MASK; in mf624_setup_mem() 125 if (!mem->addr) in mf624_setup_mem() 127 mem->size = ((start & ~PAGE_MASK) + len + PAGE_SIZE - 1) & PAGE_MASK; in mf624_setup_mem() 128 mem->memtype = UIO_MEM_PHYS; in mf624_setup_mem() [all …]
|
/drivers/gpu/drm/nouveau/dispnv50/ |
D | headc57d.c | 102 headc57d_olut_load_8(struct drm_color_lut *in, int size, void __iomem *mem) in headc57d_olut_load_8() argument 104 memset_io(mem, 0x00, 0x20); /* VSS header. */ in headc57d_olut_load_8() 105 mem += 0x20; in headc57d_olut_load_8() 119 for (i = 0; i < 4; i++, mem += 8) { in headc57d_olut_load_8() 120 writew(r + ri * i, mem + 0); in headc57d_olut_load_8() 121 writew(g + gi * i, mem + 2); in headc57d_olut_load_8() 122 writew(b + bi * i, mem + 4); in headc57d_olut_load_8() 129 writew(readw(mem - 8), mem + 0); in headc57d_olut_load_8() 130 writew(readw(mem - 6), mem + 2); in headc57d_olut_load_8() 131 writew(readw(mem - 4), mem + 4); in headc57d_olut_load_8() [all …]
|
/drivers/leds/ |
D | leds-bcm6328.c | 65 void __iomem *mem; member 112 mode = led->mem + BCM6328_REG_MODE_HI; in bcm6328_led_mode() 114 mode = led->mem + BCM6328_REG_MODE_LO; in bcm6328_led_mode() 187 val = bcm6328_led_read(led->mem + BCM6328_REG_INIT); in bcm6328_blink_set() 190 bcm6328_led_write(led->mem + BCM6328_REG_INIT, val); in bcm6328_blink_set() 205 void __iomem *mem, spinlock_t *lock) in bcm6328_hwled() argument 211 val = bcm6328_led_read(mem + BCM6328_REG_HWDIS); in bcm6328_hwled() 213 bcm6328_led_write(mem + BCM6328_REG_HWDIS, val); in bcm6328_hwled() 227 addr = mem + BCM6328_REG_LNKACTSEL_LO; in bcm6328_hwled() 229 addr = mem + BCM6328_REG_LNKACTSEL_HI; in bcm6328_hwled() [all …]
|
D | leds-bcm6358.c | 40 void __iomem *mem; member 64 static unsigned long bcm6358_led_busy(void __iomem *mem) in bcm6358_led_busy() argument 68 while ((val = bcm6358_led_read(mem + BCM6358_REG_CTRL)) & in bcm6358_led_busy() 83 bcm6358_led_busy(led->mem); in bcm6358_led_set() 84 val = bcm6358_led_read(led->mem + BCM6358_REG_MODE); in bcm6358_led_set() 90 bcm6358_led_write(led->mem + BCM6358_REG_MODE, val); in bcm6358_led_set() 95 void __iomem *mem, spinlock_t *lock) in bcm6358_led() argument 106 led->mem = mem; in bcm6358_led() 122 val = bcm6358_led_read(led->mem + BCM6358_REG_MODE); in bcm6358_led() 155 void __iomem *mem; in bcm6358_leds_probe() local [all …]
|