/drivers/virtio/ |
D | virtio_mem.c | 262 static void virtio_mem_retry(struct virtio_mem *vm); 268 static int register_virtio_mem_device(struct virtio_mem *vm) in register_virtio_mem_device() argument 277 list_add_rcu(&vm->next, &virtio_mem_devices); in register_virtio_mem_device() 287 static void unregister_virtio_mem_device(struct virtio_mem *vm) in unregister_virtio_mem_device() argument 291 list_del_rcu(&vm->next); in unregister_virtio_mem_device() 318 static unsigned long virtio_mem_phys_to_bb_id(struct virtio_mem *vm, in virtio_mem_phys_to_bb_id() argument 321 return addr / vm->bbm.bb_size; in virtio_mem_phys_to_bb_id() 327 static uint64_t virtio_mem_bb_id_to_phys(struct virtio_mem *vm, in virtio_mem_bb_id_to_phys() argument 330 return bb_id * vm->bbm.bb_size; in virtio_mem_bb_id_to_phys() 336 static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm, in virtio_mem_phys_to_sb_id() argument [all …]
|
/drivers/gpu/drm/lima/ |
D | lima_vm.c | 18 struct lima_vm *vm; member 35 static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end) in lima_vm_unmap_range() argument 43 vm->bts[pbe].cpu[bte] = 0; in lima_vm_unmap_range() 47 static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va) in lima_vm_map_page() argument 52 if (!vm->bts[pbe].cpu) { in lima_vm_map_page() 57 vm->bts[pbe].cpu = dma_alloc_wc( in lima_vm_map_page() 58 vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT, in lima_vm_map_page() 59 &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); in lima_vm_map_page() 60 if (!vm->bts[pbe].cpu) in lima_vm_map_page() 63 pts = vm->bts[pbe].dma; in lima_vm_map_page() [all …]
|
/drivers/gpu/drm/i915/gt/ |
D | intel_gtt.c | 15 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz) in alloc_pt_lmem() argument 31 obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz, 0); in alloc_pt_lmem() 38 obj->base.resv = i915_vm_resv_get(vm); in alloc_pt_lmem() 39 obj->shares_resv_from = vm; in alloc_pt_lmem() 45 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz) in alloc_pt_dma() argument 49 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) in alloc_pt_dma() 50 i915_gem_shrink_all(vm->i915); in alloc_pt_dma() 52 obj = i915_gem_object_create_internal(vm->i915, sz); in alloc_pt_dma() 59 obj->base.resv = i915_vm_resv_get(vm); in alloc_pt_dma() 60 obj->shares_resv_from = vm; in alloc_pt_dma() [all …]
|
D | intel_gtt.h | 61 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) 198 void (*bind_vma)(struct i915_address_space *vm, 207 void (*unbind_vma)(struct i915_address_space *vm, 264 (*alloc_pt_dma)(struct i915_address_space *vm, int sz); 272 void (*allocate_va_range)(struct i915_address_space *vm, 275 void (*clear_range)(struct i915_address_space *vm, 277 void (*insert_page)(struct i915_address_space *vm, 282 void (*insert_entries)(struct i915_address_space *vm, 286 void (*cleanup)(struct i915_address_space *vm); 288 void (*foreach)(struct i915_address_space *vm, [all …]
|
D | intel_ggtt.c | 47 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_init_hw() 49 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); in ggtt_init_hw() 51 ggtt->vm.is_ggtt = true; in ggtt_init_hw() 54 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); in ggtt_init_hw() 57 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; in ggtt_init_hw() 63 ggtt->vm.cleanup(&ggtt->vm); in ggtt_init_hw() 124 mutex_lock(&ggtt->vm.mutex); in i915_ggtt_suspend() 127 open = atomic_xchg(&ggtt->vm.open, 0); in i915_ggtt_suspend() 129 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { in i915_ggtt_suspend() 142 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); in i915_ggtt_suspend() [all …]
|
D | gen8_ppgtt.c | 60 struct drm_i915_private *i915 = ppgtt->vm.i915; in gen8_ppgtt_notify_vgt() 61 struct intel_uncore *uncore = ppgtt->vm.gt->uncore; in gen8_ppgtt_notify_vgt() 72 if (i915_vm_is_4lvl(&ppgtt->vm)) { in gen8_ppgtt_notify_vgt() 150 static unsigned int gen8_pd_top_count(const struct i915_address_space *vm) in gen8_pd_top_count() argument 152 unsigned int shift = __gen8_pte_shift(vm->top); in gen8_pd_top_count() 154 return (vm->total + (1ull << shift) - 1) >> shift; in gen8_pd_top_count() 158 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) in gen8_pdp_for_page_index() argument 160 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); in gen8_pdp_for_page_index() 162 if (vm->top == 2) in gen8_pdp_for_page_index() 165 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top)); in gen8_pdp_for_page_index() [all …]
|
D | gen6_ppgtt.c | 19 dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]); in gen6_write_pde() 70 static void gen6_ppgtt_clear_range(struct i915_address_space *vm, in gen6_ppgtt_clear_range() argument 73 struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); in gen6_ppgtt_clear_range() 75 const gen6_pte_t scratch_pte = vm->scratch[0]->encode; in gen6_ppgtt_clear_range() 106 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, in gen6_ppgtt_insert_entries() argument 111 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); in gen6_ppgtt_insert_entries() 116 const u32 pte_encode = vm->pte_encode(0, cache_level, flags); in gen6_ppgtt_insert_entries() 162 gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt); in gen6_flush_pd() 168 static void gen6_alloc_va_range(struct i915_address_space *vm, in gen6_alloc_va_range() argument 172 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); in gen6_alloc_va_range() [all …]
|
D | intel_ppgtt.c | 15 struct i915_page_table *alloc_pt(struct i915_address_space *vm) in alloc_pt() argument 23 pt->base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); in alloc_pt() 51 struct i915_page_directory *alloc_pd(struct i915_address_space *vm) in alloc_pd() argument 59 pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); in alloc_pd() 69 void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl) in free_px() argument 174 trace_i915_ppgtt_create(&ppgtt->vm); in i915_ppgtt_create() 179 void ppgtt_bind_vma(struct i915_address_space *vm, in ppgtt_bind_vma() argument 188 vm->allocate_va_range(vm, stash, vma->node.start, vma->size); in ppgtt_bind_vma() 199 vm->insert_entries(vm, vma, cache_level, pte_flags); in ppgtt_bind_vma() 203 void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) in ppgtt_unbind_vma() argument [all …]
|
/drivers/gpu/drm/i915/display/ |
D | intel_dpt.c | 13 struct i915_address_space vm; member 20 #define i915_is_dpt(vm) ((vm)->is_dpt) argument 23 i915_vm_to_dpt(struct i915_address_space *vm) in i915_vm_to_dpt() argument 25 BUILD_BUG_ON(offsetof(struct i915_dpt, vm)); in i915_vm_to_dpt() 26 GEM_BUG_ON(!i915_is_dpt(vm)); in i915_vm_to_dpt() 27 return container_of(vm, struct i915_dpt, vm); in i915_vm_to_dpt() 30 #define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT) 37 static void dpt_insert_page(struct i915_address_space *vm, in dpt_insert_page() argument 43 struct i915_dpt *dpt = i915_vm_to_dpt(vm); in dpt_insert_page() 47 vm->pte_encode(addr, level, flags)); in dpt_insert_page() [all …]
|
/drivers/virt/acrn/ |
D | vm.c | 25 struct acrn_vm *acrn_vm_create(struct acrn_vm *vm, in acrn_vm_create() argument 37 mutex_init(&vm->regions_mapping_lock); in acrn_vm_create() 38 INIT_LIST_HEAD(&vm->ioreq_clients); in acrn_vm_create() 39 spin_lock_init(&vm->ioreq_clients_lock); in acrn_vm_create() 40 vm->vmid = vm_param->vmid; in acrn_vm_create() 41 vm->vcpu_num = vm_param->vcpu_num; in acrn_vm_create() 43 if (acrn_ioreq_init(vm, vm_param->ioreq_buf) < 0) { in acrn_vm_create() 45 vm->vmid = ACRN_INVALID_VMID; in acrn_vm_create() 50 list_add(&vm->list, &acrn_vm_list); in acrn_vm_create() 53 acrn_ioeventfd_init(vm); in acrn_vm_create() [all …]
|
D | irqfd.c | 33 struct acrn_vm *vm; member 44 struct acrn_vm *vm = irqfd->vm; in acrn_irqfd_inject() local 46 acrn_msi_inject(vm, irqfd->msi.msi_addr, in acrn_irqfd_inject() 54 lockdep_assert_held(&irqfd->vm->irqfds_lock); in hsm_irqfd_shutdown() 66 struct acrn_vm *vm; in hsm_irqfd_shutdown_work() local 69 vm = irqfd->vm; in hsm_irqfd_shutdown_work() 70 mutex_lock(&vm->irqfds_lock); in hsm_irqfd_shutdown_work() 73 mutex_unlock(&vm->irqfds_lock); in hsm_irqfd_shutdown_work() 82 struct acrn_vm *vm; in hsm_irqfd_wakeup() local 85 vm = irqfd->vm; in hsm_irqfd_wakeup() [all …]
|
D | ioreq.c | 39 static int ioreq_complete_request(struct acrn_vm *vm, u16 vcpu, in ioreq_complete_request() argument 64 ret = hcall_notify_req_finish(vm->vmid, vcpu); in ioreq_complete_request() 79 if (vcpu >= client->vm->vcpu_num) in acrn_ioreq_complete_request() 84 acrn_req = (struct acrn_io_request *)client->vm->ioreq_buf; in acrn_ioreq_complete_request() 88 ret = ioreq_complete_request(client->vm, vcpu, acrn_req); in acrn_ioreq_complete_request() 93 int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu) in acrn_ioreq_request_default_complete() argument 97 spin_lock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_request_default_complete() 98 if (vm->default_client) in acrn_ioreq_request_default_complete() 99 ret = acrn_ioreq_complete_request(vm->default_client, in acrn_ioreq_request_default_complete() 101 spin_unlock_bh(&vm->ioreq_clients_lock); in acrn_ioreq_request_default_complete() [all …]
|
D | ioeventfd.c | 43 static void acrn_ioeventfd_shutdown(struct acrn_vm *vm, struct hsm_ioeventfd *p) in acrn_ioeventfd_shutdown() argument 45 lockdep_assert_held(&vm->ioeventfds_lock); in acrn_ioeventfd_shutdown() 52 static bool hsm_ioeventfd_is_conflict(struct acrn_vm *vm, in hsm_ioeventfd_is_conflict() argument 57 lockdep_assert_held(&vm->ioeventfds_lock); in hsm_ioeventfd_is_conflict() 60 list_for_each_entry(p, &vm->ioeventfds, list) in hsm_ioeventfd_is_conflict() 76 static int acrn_ioeventfd_assign(struct acrn_vm *vm, in acrn_ioeventfd_assign() argument 121 mutex_lock(&vm->ioeventfds_lock); in acrn_ioeventfd_assign() 123 if (hsm_ioeventfd_is_conflict(vm, p)) { in acrn_ioeventfd_assign() 129 ret = acrn_ioreq_range_add(vm->ioeventfd_client, p->type, in acrn_ioeventfd_assign() 134 list_add_tail(&p->list, &vm->ioeventfds); in acrn_ioeventfd_assign() [all …]
|
D | hsm.c | 31 struct acrn_vm *vm; in acrn_dev_open() local 33 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in acrn_dev_open() 34 if (!vm) in acrn_dev_open() 37 vm->vmid = ACRN_INVALID_VMID; in acrn_dev_open() 38 filp->private_data = vm; in acrn_dev_open() 110 struct acrn_vm *vm = filp->private_data; in acrn_dev_ioctl() local 124 if (vm->vmid == ACRN_INVALID_VMID && cmd != ACRN_IOCTL_CREATE_VM) { in acrn_dev_ioctl() 142 vm = acrn_vm_create(vm, vm_param); in acrn_dev_ioctl() 143 if (!vm) { in acrn_dev_ioctl() 151 acrn_vm_destroy(vm); in acrn_dev_ioctl() [all …]
|
D | mm.c | 18 static int modify_region(struct acrn_vm *vm, struct vm_memory_region_op *region) in modify_region() argument 27 regions->vmid = vm->vmid; in modify_region() 34 "Failed to set memory region for VM[%u]!\n", vm->vmid); in modify_region() 51 int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa, in acrn_mm_region_add() argument 67 ret = modify_region(vm, region); in acrn_mm_region_add() 84 int acrn_mm_region_del(struct acrn_vm *vm, u64 user_gpa, u64 size) in acrn_mm_region_del() argument 99 ret = modify_region(vm, region); in acrn_mm_region_del() 107 int acrn_vm_memseg_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap) in acrn_vm_memseg_map() argument 112 return acrn_vm_ram_map(vm, memmap); in acrn_vm_memseg_map() 120 ret = acrn_mm_region_add(vm, memmap->user_vm_pa, in acrn_vm_memseg_map() [all …]
|
D | acrn_drv.h | 121 struct acrn_vm *vm; member 189 struct acrn_vm *acrn_vm_create(struct acrn_vm *vm, 191 int acrn_vm_destroy(struct acrn_vm *vm); 192 int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa, 194 int acrn_mm_region_del(struct acrn_vm *vm, u64 user_gpa, u64 size); 195 int acrn_vm_memseg_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap); 196 int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap); 197 int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap); 198 void acrn_vm_all_ram_unmap(struct acrn_vm *vm); 200 int acrn_ioreq_init(struct acrn_vm *vm, u64 buf_vma); [all …]
|
/drivers/gpu/drm/i915/selftests/ |
D | mock_gtt.c | 27 static void mock_insert_page(struct i915_address_space *vm, in mock_insert_page() argument 35 static void mock_insert_entries(struct i915_address_space *vm, in mock_insert_entries() argument 41 static void mock_bind_ppgtt(struct i915_address_space *vm, in mock_bind_ppgtt() argument 51 static void mock_unbind_ppgtt(struct i915_address_space *vm, in mock_unbind_ppgtt() argument 56 static void mock_cleanup(struct i915_address_space *vm) in mock_cleanup() argument 60 static void mock_clear_range(struct i915_address_space *vm, in mock_clear_range() argument 73 ppgtt->vm.gt = &i915->gt; in mock_ppgtt() 74 ppgtt->vm.i915 = i915; in mock_ppgtt() 75 ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); in mock_ppgtt() 76 ppgtt->vm.dma = i915->drm.dev; in mock_ppgtt() [all …]
|
D | i915_gem_gtt.c | 162 if (!ppgtt->vm.allocate_va_range) in igt_ppgtt_alloc() 173 limit = min(ppgtt->vm.total, limit); in igt_ppgtt_alloc() 177 err = i915_vm_lock_objects(&ppgtt->vm, &ww); in igt_ppgtt_alloc() 185 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size); in igt_ppgtt_alloc() 189 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash); in igt_ppgtt_alloc() 191 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in igt_ppgtt_alloc() 195 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size); in igt_ppgtt_alloc() 198 ppgtt->vm.clear_range(&ppgtt->vm, 0, size); in igt_ppgtt_alloc() 200 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in igt_ppgtt_alloc() 207 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last); in igt_ppgtt_alloc() [all …]
|
D | i915_gem_evict.c | 55 obj = i915_gem_object_create_internal(ggtt->vm.i915, in populate_ggtt() 73 count, ggtt->vm.total / PAGE_SIZE); in populate_ggtt() 75 if (list_empty(&ggtt->vm.bound_list)) { in populate_ggtt() 87 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) in unpin_ggtt() 102 i915_gem_drain_freed_objects(ggtt->vm.i915); in cleanup_objects() 119 mutex_lock(&ggtt->vm.mutex); in igt_evict_something() 120 err = i915_gem_evict_something(&ggtt->vm, in igt_evict_something() 124 mutex_unlock(&ggtt->vm.mutex); in igt_evict_something() 134 mutex_lock(&ggtt->vm.mutex); in igt_evict_something() 135 err = i915_gem_evict_something(&ggtt->vm, in igt_evict_something() [all …]
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_vm.c | 102 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_set_pasid() argument 107 if (vm->pasid == pasid) in amdgpu_vm_set_pasid() 110 if (vm->pasid) { in amdgpu_vm_set_pasid() 111 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); in amdgpu_vm_set_pasid() 115 vm->pasid = 0; in amdgpu_vm_set_pasid() 119 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, in amdgpu_vm_set_pasid() 124 vm->pasid = pasid; in amdgpu_vm_set_pasid() 136 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) in amdgpu_vm_eviction_lock() argument 138 mutex_lock(&vm->eviction_lock); in amdgpu_vm_eviction_lock() 139 vm->saved_flags = memalloc_noreclaim_save(); in amdgpu_vm_eviction_lock() [all …]
|
/drivers/gpu/drm/radeon/ |
D | radeon_vm.c | 130 struct radeon_vm *vm, in radeon_vm_get_bos() argument 136 list = kvmalloc_array(vm->max_pde_used + 2, in radeon_vm_get_bos() 142 list[0].robj = vm->page_directory; in radeon_vm_get_bos() 145 list[0].tv.bo = &vm->page_directory->tbo; in radeon_vm_get_bos() 150 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { in radeon_vm_get_bos() 151 if (!vm->page_tables[i].bo) in radeon_vm_get_bos() 154 list[idx].robj = vm->page_tables[i].bo; in radeon_vm_get_bos() 179 struct radeon_vm *vm, int ring) in radeon_vm_grab_id() argument 182 struct radeon_vm_id *vm_id = &vm->ids[ring]; in radeon_vm_grab_id() 238 struct radeon_vm *vm, in radeon_vm_flush() argument [all …]
|
/drivers/gpu/drm/imx/dcss/ |
D | dcss-ss.c | 126 void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm, in dcss_ss_sync_set() argument 135 lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len + in dcss_ss_sync_set() 136 vm->hactive - 1; in dcss_ss_sync_set() 137 lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len + in dcss_ss_sync_set() 138 vm->vactive - 1; in dcss_ss_sync_set() 142 hsync_start = vm->hfront_porch + vm->hback_porch + vm->hsync_len + in dcss_ss_sync_set() 143 vm->hactive - 1; in dcss_ss_sync_set() 144 hsync_end = vm->hsync_len - 1; in dcss_ss_sync_set() 150 vsync_start = vm->vfront_porch - 1; in dcss_ss_sync_set() 151 vsync_end = vm->vfront_porch + vm->vsync_len - 1; in dcss_ss_sync_set() [all …]
|
/drivers/irqchip/ |
D | irq-gic-v4.c | 159 int its_alloc_vcpu_irqs(struct its_vm *vm) in its_alloc_vcpu_irqs() argument 163 vm->fwnode = irq_domain_alloc_named_id_fwnode("GICv4-vpe", in its_alloc_vcpu_irqs() 165 if (!vm->fwnode) in its_alloc_vcpu_irqs() 168 vm->domain = irq_domain_create_hierarchy(gic_domain, 0, vm->nr_vpes, in its_alloc_vcpu_irqs() 169 vm->fwnode, vpe_domain_ops, in its_alloc_vcpu_irqs() 170 vm); in its_alloc_vcpu_irqs() 171 if (!vm->domain) in its_alloc_vcpu_irqs() 174 for (i = 0; i < vm->nr_vpes; i++) { in its_alloc_vcpu_irqs() 175 vm->vpes[i]->its_vm = vm; in its_alloc_vcpu_irqs() 176 vm->vpes[i]->idai = true; in its_alloc_vcpu_irqs() [all …]
|
/drivers/video/fbdev/omap2/omapfb/dss/ |
D | display.c | 259 void videomode_to_omap_video_timings(const struct videomode *vm, in videomode_to_omap_video_timings() argument 264 ovt->pixelclock = vm->pixelclock; in videomode_to_omap_video_timings() 265 ovt->x_res = vm->hactive; in videomode_to_omap_video_timings() 266 ovt->hbp = vm->hback_porch; in videomode_to_omap_video_timings() 267 ovt->hfp = vm->hfront_porch; in videomode_to_omap_video_timings() 268 ovt->hsw = vm->hsync_len; in videomode_to_omap_video_timings() 269 ovt->y_res = vm->vactive; in videomode_to_omap_video_timings() 270 ovt->vbp = vm->vback_porch; in videomode_to_omap_video_timings() 271 ovt->vfp = vm->vfront_porch; in videomode_to_omap_video_timings() 272 ovt->vsw = vm->vsync_len; in videomode_to_omap_video_timings() [all …]
|
/drivers/gpu/drm/omapdrm/dss/ |
D | hdmi_wp.c | 144 const struct videomode *vm) in hdmi_wp_video_config_interface() argument 150 vsync_inv = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW); in hdmi_wp_video_config_interface() 151 hsync_inv = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW); in hdmi_wp_video_config_interface() 158 r = FLD_MOD(r, !!(vm->flags & DISPLAY_FLAGS_INTERLACED), 3, 3); in hdmi_wp_video_config_interface() 164 const struct videomode *vm) in hdmi_wp_video_config_timing() argument 181 timing_h |= FLD_VAL(vm->hback_porch, 31, 20); in hdmi_wp_video_config_timing() 182 timing_h |= FLD_VAL(vm->hfront_porch, 19, 8); in hdmi_wp_video_config_timing() 183 timing_h |= FLD_VAL(vm->hsync_len - hsync_len_offset, 7, 0); in hdmi_wp_video_config_timing() 186 timing_v |= FLD_VAL(vm->vback_porch, 31, 20); in hdmi_wp_video_config_timing() 187 timing_v |= FLD_VAL(vm->vfront_porch, 19, 8); in hdmi_wp_video_config_timing() [all …]
|