Home
last modified time | relevance | path

Searched refs:qdev (Results 1 – 25 of 27) sorted by relevance

12

/drivers/net/ethernet/qlogic/
Dqla3xxx.c104 static int ql_sem_spinlock(struct ql3_adapter *qdev, in ql_sem_spinlock() argument
108 qdev->mem_map_registers; in ql_sem_spinlock()
123 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) in ql_sem_unlock() argument
126 qdev->mem_map_registers; in ql_sem_unlock()
131 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) in ql_sem_lock() argument
134 qdev->mem_map_registers; in ql_sem_lock()
145 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) in ql_wait_for_drvr_lock() argument
150 if (ql_sem_lock(qdev, in ql_wait_for_drvr_lock()
152 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) in ql_wait_for_drvr_lock()
154 netdev_printk(KERN_DEBUG, qdev->ndev, in ql_wait_for_drvr_lock()
[all …]
/drivers/gpu/drm/qxl/
Dqxl_kms.c34 static void qxl_dump_mode(struct qxl_device *qdev, void *p) in qxl_dump_mode() argument
42 static bool qxl_check_device(struct qxl_device *qdev) in qxl_check_device() argument
44 struct qxl_rom *rom = qdev->rom; in qxl_check_device()
63 qdev->vram_size = rom->surface0_area_size; in qxl_check_device()
67 qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset]; in qxl_check_device()
69 qdev->mode_info.num_modes); in qxl_check_device()
70 qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1); in qxl_check_device()
71 for (i = 0; i < qdev->mode_info.num_modes; i++) in qxl_check_device()
72 qxl_dump_mode(qdev, qdev->mode_info.modes + i); in qxl_check_device()
76 static void setup_hw_slot(struct qxl_device *qdev, int slot_index, in setup_hw_slot() argument
[all …]
Dqxl_cmd.c31 static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
178 qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, in qxl_push_command_ring_release() argument
185 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); in qxl_push_command_ring_release()
187 return qxl_ring_push(qdev->command_ring, &cmd, interruptible); in qxl_push_command_ring_release()
191 qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, in qxl_push_cursor_ring_release() argument
198 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); in qxl_push_cursor_ring_release()
200 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); in qxl_push_cursor_ring_release()
203 bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush) in qxl_queue_garbage_collect() argument
205 if (!qxl_check_idle(qdev->release_ring)) { in qxl_queue_garbage_collect()
206 queue_work(qdev->gc_queue, &qdev->gc_work); in qxl_queue_garbage_collect()
[all …]
Dqxl_display.c39 void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count) in qxl_alloc_client_monitors_config() argument
41 if (qdev->client_monitors_config && in qxl_alloc_client_monitors_config()
42 count > qdev->client_monitors_config->count) { in qxl_alloc_client_monitors_config()
43 kfree(qdev->client_monitors_config); in qxl_alloc_client_monitors_config()
44 qdev->client_monitors_config = NULL; in qxl_alloc_client_monitors_config()
46 if (!qdev->client_monitors_config) { in qxl_alloc_client_monitors_config()
47 qdev->client_monitors_config = kzalloc( in qxl_alloc_client_monitors_config()
50 if (!qdev->client_monitors_config) { in qxl_alloc_client_monitors_config()
51 qxl_io_log(qdev, in qxl_alloc_client_monitors_config()
57 qdev->client_monitors_config->count = count; in qxl_alloc_client_monitors_config()
[all …]
Dqxl_irq.c31 struct qxl_device *qdev = (struct qxl_device *)dev->dev_private; in qxl_irq_handler() local
34 pending = xchg(&qdev->ram_header->int_pending, 0); in qxl_irq_handler()
39 atomic_inc(&qdev->irq_received); in qxl_irq_handler()
42 atomic_inc(&qdev->irq_received_display); in qxl_irq_handler()
43 wake_up_all(&qdev->display_event); in qxl_irq_handler()
44 qxl_queue_garbage_collect(qdev, false); in qxl_irq_handler()
47 atomic_inc(&qdev->irq_received_cursor); in qxl_irq_handler()
48 wake_up_all(&qdev->cursor_event); in qxl_irq_handler()
51 atomic_inc(&qdev->irq_received_io_cmd); in qxl_irq_handler()
52 wake_up_all(&qdev->io_cmd_event); in qxl_irq_handler()
[all …]
Dqxl_drv.h74 #define QXL_INFO(qdev, fmt, ...) do { \ argument
76 qxl_io_log(qdev, fmt, __VA_ARGS__); \
79 #define QXL_DEBUG(qdev, fmt, ...) do { \ argument
81 qxl_io_log(qdev, fmt, __VA_ARGS__); \
84 #define QXL_INFO_ONCE(qdev, fmt, ...) do { \ argument
88 QXL_INFO(qdev, fmt, __VA_ARGS__); \
213 struct qxl_device *qdev; member
220 struct qxl_device *qdev; member
238 void qxl_debugfs_remove_files(struct qxl_device *qdev);
335 __printf(2,3) void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
[all …]
Dqxl_debugfs.c43 struct qxl_device *qdev = node->minor->dev->dev_private; in qxl_debugfs_irq_received() local
45 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received)); in qxl_debugfs_irq_received()
46 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display)); in qxl_debugfs_irq_received()
47 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor)); in qxl_debugfs_irq_received()
48 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd)); in qxl_debugfs_irq_received()
49 seq_printf(m, "%d\n", qdev->irq_received_error); in qxl_debugfs_irq_received()
57 struct qxl_device *qdev = node->minor->dev->dev_private; in qxl_debugfs_buffers_info() local
60 list_for_each_entry(bo, &qdev->gem.objects, list) { in qxl_debugfs_buffers_info()
102 int qxl_debugfs_add_files(struct qxl_device *qdev, in qxl_debugfs_add_files() argument
108 for (i = 0; i < qdev->debugfs_count; i++) { in qxl_debugfs_add_files()
[all …]
Dqxl_ttm.c38 static int qxl_ttm_debugfs_init(struct qxl_device *qdev);
43 struct qxl_device *qdev; in qxl_get_qdev() local
46 qdev = container_of(mman, struct qxl_device, mman); in qxl_get_qdev()
47 return qdev; in qxl_get_qdev()
60 static int qxl_ttm_global_init(struct qxl_device *qdev) in qxl_ttm_global_init() argument
65 qdev->mman.mem_global_referenced = false; in qxl_ttm_global_init()
66 global_ref = &qdev->mman.mem_global_ref; in qxl_ttm_global_init()
79 qdev->mman.bo_global_ref.mem_glob = in qxl_ttm_global_init()
80 qdev->mman.mem_global_ref.object; in qxl_ttm_global_init()
81 global_ref = &qdev->mman.bo_global_ref.ref; in qxl_ttm_global_init()
[all …]
Dqxl_release.c61 struct qxl_device *qdev; in qxl_fence_wait() local
67 qdev = container_of(fence->lock, struct qxl_device, release_lock); in qxl_fence_wait()
77 qxl_io_notify_oom(qdev); in qxl_fence_wait()
80 if (!qxl_queue_garbage_collect(qdev, true)) in qxl_fence_wait()
126 qxl_release_alloc(struct qxl_device *qdev, int type, in qxl_release_alloc() argument
145 spin_lock(&qdev->release_idr_lock); in qxl_release_alloc()
146 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT); in qxl_release_alloc()
147 release->base.seqno = ++qdev->release_seqno; in qxl_release_alloc()
148 spin_unlock(&qdev->release_idr_lock); in qxl_release_alloc()
156 QXL_INFO(qdev, "allocated release %d\n", handle); in qxl_release_alloc()
[all …]
Dqxl_draw.c26 static int alloc_clips(struct qxl_device *qdev, in alloc_clips() argument
33 return qxl_alloc_bo_reserved(qdev, release, size, clips_bo); in alloc_clips()
39 static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, in drawable_set_clipping() argument
59 alloc_drawable(struct qxl_device *qdev, struct qxl_release **release) in alloc_drawable() argument
62 ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable), in alloc_drawable()
69 free_drawable(struct qxl_device *qdev, struct qxl_release *release) in free_drawable() argument
71 qxl_release_free(qdev, release); in free_drawable()
76 make_drawable(struct qxl_device *qdev, int surface, uint8_t type, in make_drawable() argument
83 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); in make_drawable()
111 drawable->mm_time = qdev->rom->mm_clock; in make_drawable()
[all …]
Dqxl_object.c33 struct qxl_device *qdev; in qxl_ttm_bo_destroy() local
36 qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; in qxl_ttm_bo_destroy()
38 qxl_surface_evict(qdev, bo, false); in qxl_ttm_bo_destroy()
39 mutex_lock(&qdev->gem.mutex); in qxl_ttm_bo_destroy()
41 mutex_unlock(&qdev->gem.mutex); in qxl_ttm_bo_destroy()
78 int qxl_bo_create(struct qxl_device *qdev, in qxl_bo_create() argument
96 r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size); in qxl_bo_create()
111 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, in qxl_bo_create()
116 dev_err(qdev->dev, in qxl_bo_create()
144 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, in qxl_bo_kmap_atomic_page() argument
[all …]
Dqxl_ioctl.c36 struct qxl_device *qdev = dev->dev_private; in qxl_alloc_ioctl() local
47 ret = qxl_gem_object_create_with_handle(qdev, file_priv, in qxl_alloc_ioctl()
64 struct qxl_device *qdev = dev->dev_private; in qxl_map_ioctl() local
67 return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle, in qxl_map_ioctl()
85 apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) in apply_reloc() argument
88 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); in apply_reloc()
89 *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, in apply_reloc()
92 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); in apply_reloc()
96 apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) in apply_surf_reloc() argument
104 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); in apply_surf_reloc()
[all …]
Dqxl_fb.c44 struct qxl_device *qdev; member
62 struct qxl_device *qdev, struct fb_info *info, in qxl_fb_image_init() argument
65 qxl_fb_image->qdev = qdev; in qxl_fb_image_init()
89 struct qxl_device *qdev = qfbdev->qdev; in qxl_fb_dirty_flush() local
114 qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2); in qxl_fb_dirty_flush()
131 qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL); in qxl_fb_dirty_flush()
138 struct qxl_device *qdev = qfbdev->qdev; in qxl_dirty_update() local
166 schedule_work(&qdev->fb_work); in qxl_dirty_update()
230 struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work); in qxl_fb_work() local
231 struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev; in qxl_fb_work()
[all …]
Dqxl_gem.c34 struct qxl_device *qdev; in qxl_gem_object_free() local
37 qdev = (struct qxl_device *)gobj->dev->dev_private; in qxl_gem_object_free()
39 qxl_surface_evict(qdev, qobj, false); in qxl_gem_object_free()
45 int qxl_gem_object_create(struct qxl_device *qdev, int size, in qxl_gem_object_create() argument
58 r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo); in qxl_gem_object_create()
68 mutex_lock(&qdev->gem.mutex); in qxl_gem_object_create()
69 list_add_tail(&qbo->list, &qdev->gem.objects); in qxl_gem_object_create()
70 mutex_unlock(&qdev->gem.mutex); in qxl_gem_object_create()
75 int qxl_gem_object_create_with_handle(struct qxl_device *qdev, in qxl_gem_object_create_with_handle() argument
89 r = qxl_gem_object_create(qdev, size, 0, in qxl_gem_object_create_with_handle()
[all …]
Dqxl_image.c33 qxl_allocate_chunk(struct qxl_device *qdev, in qxl_allocate_chunk() argument
45 ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); in qxl_allocate_chunk()
56 qxl_image_alloc_objects(struct qxl_device *qdev, in qxl_image_alloc_objects() argument
70 ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo); in qxl_image_alloc_objects()
76 ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height); in qxl_image_alloc_objects()
86 void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage) in qxl_image_free_objects() argument
100 qxl_image_init_helper(struct qxl_device *qdev, in qxl_image_init_helper() argument
127 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); in qxl_image_init_helper()
132 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr); in qxl_image_init_helper()
145 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT); in qxl_image_init_helper()
[all …]
Dqxl_drv.c94 struct qxl_device *qdev = dev->dev_private; in qxl_drm_freeze() local
100 qxl_fbdev_set_suspend(qdev, 1); in qxl_drm_freeze()
110 qxl_destroy_monitors_object(qdev); in qxl_drm_freeze()
111 qxl_surf_evict(qdev); in qxl_drm_freeze()
112 qxl_vram_evict(qdev); in qxl_drm_freeze()
114 while (!qxl_check_idle(qdev->command_ring)); in qxl_drm_freeze()
115 while (!qxl_check_idle(qdev->release_ring)) in qxl_drm_freeze()
116 qxl_queue_garbage_collect(qdev, 1); in qxl_drm_freeze()
125 struct qxl_device *qdev = dev->dev_private; in qxl_drm_resume() local
127 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK; in qxl_drm_resume()
[all …]
Dqxl_object.h37 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; in qxl_bo_reserve() local
38 dev_err(qdev->dev, "%p reserve failed\n", bo); in qxl_bo_reserve()
73 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; in qxl_bo_wait() local
74 dev_err(qdev->dev, "%p reserve failed for wait\n", in qxl_bo_wait()
87 extern int qxl_bo_create(struct qxl_device *qdev,
94 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
95 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
Dqxl_dumb.c35 struct qxl_device *qdev = dev->dev_private; in qxl_mode_dumb_create() local
62 r = qxl_gem_object_create_with_handle(qdev, file_priv, in qxl_mode_dumb_create()
/drivers/net/ethernet/qlogic/qlge/
Dqlge_mpi.c3 int ql_unpause_mpi_risc(struct ql_adapter *qdev) in ql_unpause_mpi_risc() argument
8 tmp = ql_read32(qdev, CSR); in ql_unpause_mpi_risc()
12 ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE); in ql_unpause_mpi_risc()
16 int ql_pause_mpi_risc(struct ql_adapter *qdev) in ql_pause_mpi_risc() argument
22 ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE); in ql_pause_mpi_risc()
24 tmp = ql_read32(qdev, CSR); in ql_pause_mpi_risc()
33 int ql_hard_reset_mpi_risc(struct ql_adapter *qdev) in ql_hard_reset_mpi_risc() argument
39 ql_write32(qdev, CSR, CSR_CMD_SET_RST); in ql_hard_reset_mpi_risc()
41 tmp = ql_read32(qdev, CSR); in ql_hard_reset_mpi_risc()
43 ql_write32(qdev, CSR, CSR_CMD_CLR_RST); in ql_hard_reset_mpi_risc()
[all …]
Dqlge_main.c107 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask) in ql_sem_trylock() argument
137 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n"); in ql_sem_trylock()
141 ql_write32(qdev, SEM, sem_bits | sem_mask); in ql_sem_trylock()
142 return !(ql_read32(qdev, SEM) & sem_bits); in ql_sem_trylock()
145 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) in ql_sem_spinlock() argument
149 if (!ql_sem_trylock(qdev, sem_mask)) in ql_sem_spinlock()
156 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask) in ql_sem_unlock() argument
158 ql_write32(qdev, SEM, sem_mask); in ql_sem_unlock()
159 ql_read32(qdev, SEM); /* flush */ in ql_sem_unlock()
167 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) in ql_wait_reg_rdy() argument
[all …]
Dqlge_dbg.c8 static u32 ql_read_other_func_reg(struct ql_adapter *qdev, in ql_read_other_func_reg() argument
17 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) in ql_read_other_func_reg()
19 status = ql_read_mpi_reg(qdev, register_to_read, &reg_val); in ql_read_other_func_reg()
27 static int ql_write_other_func_reg(struct ql_adapter *qdev, in ql_write_other_func_reg() argument
35 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) in ql_write_other_func_reg()
37 status = ql_write_mpi_reg(qdev, register_to_read, reg_val); in ql_write_other_func_reg()
42 static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg, in ql_wait_other_func_reg_rdy() argument
49 temp = ql_read_other_func_reg(qdev, reg); in ql_wait_other_func_reg_rdy()
62 static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg, in ql_read_other_func_serdes_reg() argument
68 status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, in ql_read_other_func_serdes_reg()
[all …]
Dqlge_ethtool.c185 static int ql_update_ring_coalescing(struct ql_adapter *qdev) in ql_update_ring_coalescing() argument
191 if (!netif_running(qdev->ndev)) in ql_update_ring_coalescing()
197 cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count]; in ql_update_ring_coalescing()
198 if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs || in ql_update_ring_coalescing()
200 qdev->tx_max_coalesced_frames) { in ql_update_ring_coalescing()
201 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { in ql_update_ring_coalescing()
202 rx_ring = &qdev->rx_ring[i]; in ql_update_ring_coalescing()
204 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); in ql_update_ring_coalescing()
206 cpu_to_le16(qdev->tx_max_coalesced_frames); in ql_update_ring_coalescing()
208 status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), in ql_update_ring_coalescing()
[all …]
Dqlge.h1380 #define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count)) argument
1403 struct ql_adapter *qdev; member
1472 struct ql_adapter *qdev; member
1977 struct ql_adapter *qdev; member
2158 static inline u32 ql_read32(const struct ql_adapter *qdev, int reg) in ql_read32() argument
2160 return readl(qdev->reg_base + reg); in ql_read32()
2166 static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val) in ql_write32() argument
2168 writel(val, qdev->reg_base + reg); in ql_write32()
2209 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
2210 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
[all …]
/drivers/s390/cio/
Dqdio.h325 struct qdio_irq *qdev = (__q)->irq_ptr; \
326 if (qdev->perf_stat_enabled) \
327 (qdev->perf_stat.__attr)++; \
/drivers/gpu/drm/virtio/
Dvirtgpu_drv.h357 int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
406 struct virtio_gpu_device *qdev = in virtio_gpu_object_reserve() local
408 dev_err(qdev->dev, "%p reserve failed\n", bo); in virtio_gpu_object_reserve()

12