/drivers/gpu/drm/ttm/ |
D | ttm_memory.c | 49 struct ttm_mem_global *glob; member 97 spin_lock(&zone->glob->lock); in ttm_mem_zone_show() 108 spin_unlock(&zone->glob->lock); in ttm_mem_zone_show() 114 static void ttm_check_swapping(struct ttm_mem_global *glob); 134 spin_lock(&zone->glob->lock); in ttm_mem_zone_store() 147 spin_unlock(&zone->glob->lock); in ttm_mem_zone_store() 149 ttm_check_swapping(zone->glob); in ttm_mem_zone_store() 183 struct ttm_mem_global *glob = in ttm_mem_global_show() local 187 spin_lock(&glob->lock); in ttm_mem_global_show() 188 val = glob->lower_mem_limit; in ttm_mem_global_show() [all …]
|
D | ttm_execbuf_util.c | 60 struct ttm_bo_global *glob; in ttm_eu_backoff_reservation() local 66 glob = entry->bo->bdev->glob; in ttm_eu_backoff_reservation() 68 spin_lock(&glob->lru_lock); in ttm_eu_backoff_reservation() 76 spin_unlock(&glob->lru_lock); in ttm_eu_backoff_reservation() 99 struct ttm_bo_global *glob; in ttm_eu_reserve_buffers() local 107 glob = entry->bo->bdev->glob; in ttm_eu_reserve_buffers() 177 spin_lock(&glob->lru_lock); in ttm_eu_reserve_buffers() 179 spin_unlock(&glob->lru_lock); in ttm_eu_reserve_buffers() 191 struct ttm_bo_global *glob; in ttm_eu_fence_buffer_objects() local 197 glob = bo->bdev->glob; in ttm_eu_fence_buffer_objects() [all …]
|
D | ttm_bo.c | 119 struct ttm_bo_global *glob = in ttm_bo_global_show() local 123 atomic_read(&glob->bo_count)); in ttm_bo_global_show() 161 atomic_dec(&bo->bdev->glob->bo_count); in ttm_bo_release_list() 167 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); in ttm_bo_release_list() 191 list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]); in ttm_bo_add_mem_to_lru() 229 struct ttm_bo_global *glob = bo->bdev->glob; in ttm_bo_del_sub_from_lru() local 231 spin_lock(&glob->lru_lock); in ttm_bo_del_sub_from_lru() 233 spin_unlock(&glob->lru_lock); in ttm_bo_del_sub_from_lru() 314 lru = &pos->first->bdev->glob->swap_lru[i]; in ttm_bo_bulk_move_lru_tail() 478 struct ttm_bo_global *glob = bdev->glob; in ttm_bo_cleanup_refs_or_queue() local [all …]
|
D | ttm_page_alloc.c | 954 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) in ttm_page_alloc_init() argument 998 &glob->kobj, "pool"); in ttm_page_alloc_init() 1031 struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; in ttm_pool_unpopulate_helper() 1052 struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; in ttm_pool_populate()
|
D | ttm_agp_backend.c | 54 struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page; in ttm_agp_bind()
|
D | ttm_bo_vm.c | 180 spin_lock(&bdev->glob->lru_lock); in ttm_bo_vm_fault() 182 spin_unlock(&bdev->glob->lru_lock); in ttm_bo_vm_fault()
|
D | ttm_page_alloc_dma.c | 890 struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; in ttm_dma_populate() 995 struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; in ttm_dma_unpopulate() 1161 int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) in ttm_dma_page_alloc_init() argument 1182 &glob->kobj, "dma_pool"); in ttm_dma_page_alloc_init()
|
D | ttm_bo_util.c | 506 atomic_inc(&bo->bdev->glob->bo_count); in ttm_buffer_object_transfer()
|
/drivers/staging/comedi/drivers/ni_routing/tools/ |
D | csv_collection.py | 4 import os, csv, glob 27 for fname in glob.glob(pattern):
|
/drivers/gpu/drm/nouveau/nvkm/engine/gr/ |
D | gv100.c | 35 char glob[128]; in gv100_gr_trap_sm() local 37 nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr); in gv100_gr_trap_sm() 42 gpc, tpc, sm, gerr, glob, werr, warp ? warp->name : ""); in gv100_gr_trap_sm()
|
D | gf100.c | 1242 char glob[128]; in gf100_gr_trap_mp() local 1244 nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr); in gf100_gr_trap_mp() 1249 gpc, tpc, gerr, glob, werr, warp ? warp->name : ""); in gf100_gr_trap_mp()
|
/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_ttm_buffer.c | 411 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); in vmw_ttm_map_dma() local 440 ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx); in vmw_ttm_map_dma() 457 ttm_mem_global_free(glob, over_alloc); in vmw_ttm_map_dma() 487 ttm_mem_global_free(glob, vmw_tt->sg_alloc_size); in vmw_ttm_map_dma() 659 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); in vmw_ttm_populate() local 668 ret = ttm_mem_global_alloc(glob, size, ctx); in vmw_ttm_populate() 675 ttm_mem_global_free(glob, size); in vmw_ttm_populate() 687 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); in vmw_ttm_unpopulate() local 701 ttm_mem_global_free(glob, size); in vmw_ttm_unpopulate()
|
/drivers/gpu/drm/qxl/ |
D | qxl_release.c | 432 struct ttm_bo_global *glob; in qxl_release_fence_buffer_objects() local 454 glob = bdev->glob; in qxl_release_fence_buffer_objects() 456 spin_lock(&glob->lru_lock); in qxl_release_fence_buffer_objects() 465 spin_unlock(&glob->lru_lock); in qxl_release_fence_buffer_objects()
|
D | qxl_ttm.c | 373 struct ttm_bo_global *glob = rdev->mman.bdev.glob; in qxl_mm_dump_table() local 376 spin_lock(&glob->lru_lock); in qxl_mm_dump_table() 378 spin_unlock(&glob->lru_lock); in qxl_mm_dump_table()
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_gart.c | 74 struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page; in amdgpu_gart_dummy_page_init()
|
D | amdgpu_vm.c | 603 struct ttm_bo_global *glob = adev->mman.bdev.glob; in amdgpu_vm_move_to_lru_tail() local 607 spin_lock(&glob->lru_lock); in amdgpu_vm_move_to_lru_tail() 609 spin_unlock(&glob->lru_lock); in amdgpu_vm_move_to_lru_tail() 615 spin_lock(&glob->lru_lock); in amdgpu_vm_move_to_lru_tail() 627 spin_unlock(&glob->lru_lock); in amdgpu_vm_move_to_lru_tail()
|
/drivers/net/ethernet/qlogic/qed/ |
D | qed_dev.c | 3961 offsetof(struct nvm_cfg1, glob) + in qed_hw_get_nvm_info() 4111 offsetof(struct nvm_cfg1, glob) + in qed_hw_get_nvm_info() 4160 offsetof(struct nvm_cfg1, glob) + in qed_hw_get_nvm_info()
|
D | qed_mcp.c | 1943 offsetof(struct nvm_cfg1, glob) + in qed_mcp_get_mbi_ver()
|
D | qed_hsi.h | 13340 struct nvm_cfg1_glob glob; member
|