/drivers/net/ethernet/qlogic/ |
D | qla3xxx.c | 103 static int ql_sem_spinlock(struct ql3_adapter *qdev, in ql_sem_spinlock() argument 107 qdev->mem_map_registers; in ql_sem_spinlock() 122 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) in ql_sem_unlock() argument 125 qdev->mem_map_registers; in ql_sem_unlock() 130 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) in ql_sem_lock() argument 133 qdev->mem_map_registers; in ql_sem_lock() 144 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) in ql_wait_for_drvr_lock() argument 149 if (ql_sem_lock(qdev, in ql_wait_for_drvr_lock() 151 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) in ql_wait_for_drvr_lock() 153 netdev_printk(KERN_DEBUG, qdev->ndev, in ql_wait_for_drvr_lock() [all …]
|
/drivers/gpu/drm/qxl/ |
D | qxl_kms.c | 38 static bool qxl_check_device(struct qxl_device *qdev) in qxl_check_device() argument 40 struct qxl_rom *rom = qdev->rom; in qxl_check_device() 55 qdev->vram_size = rom->surface0_area_size; in qxl_check_device() 60 static void setup_hw_slot(struct qxl_device *qdev, struct qxl_memslot *slot) in setup_hw_slot() argument 62 qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr; in setup_hw_slot() 63 qdev->ram_header->mem_slot.mem_end = slot->start_phys_addr + slot->size; in setup_hw_slot() 64 qxl_io_memslot_add(qdev, qdev->rom->slots_start + slot->index); in setup_hw_slot() 67 static void setup_slot(struct qxl_device *qdev, in setup_slot() argument 81 setup_hw_slot(qdev, slot); in setup_slot() 83 slot->generation = qdev->rom->slot_generation; in setup_slot() [all …]
|
D | qxl_cmd.c | 35 static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap); 186 qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, in qxl_push_command_ring_release() argument 192 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset); in qxl_push_command_ring_release() 194 return qxl_ring_push(qdev->command_ring, &cmd, interruptible); in qxl_push_command_ring_release() 198 qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, in qxl_push_cursor_ring_release() argument 204 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset); in qxl_push_cursor_ring_release() 206 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); in qxl_push_cursor_ring_release() 209 bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush) in qxl_queue_garbage_collect() argument 211 if (!qxl_check_idle(qdev->release_ring)) { in qxl_queue_garbage_collect() 212 schedule_work(&qdev->gc_work); in qxl_queue_garbage_collect() [all …]
|
D | qxl_display.c | 46 static int qxl_alloc_client_monitors_config(struct qxl_device *qdev, in qxl_alloc_client_monitors_config() argument 49 if (qdev->client_monitors_config && in qxl_alloc_client_monitors_config() 50 count > qdev->client_monitors_config->count) { in qxl_alloc_client_monitors_config() 51 kfree(qdev->client_monitors_config); in qxl_alloc_client_monitors_config() 52 qdev->client_monitors_config = NULL; in qxl_alloc_client_monitors_config() 54 if (!qdev->client_monitors_config) { in qxl_alloc_client_monitors_config() 55 qdev->client_monitors_config = kzalloc( in qxl_alloc_client_monitors_config() 56 struct_size(qdev->client_monitors_config, in qxl_alloc_client_monitors_config() 58 if (!qdev->client_monitors_config) in qxl_alloc_client_monitors_config() 61 qdev->client_monitors_config->count = count; in qxl_alloc_client_monitors_config() [all …]
|
D | qxl_irq.c | 35 struct qxl_device *qdev = to_qxl(dev); in qxl_irq_handler() local 38 pending = xchg(&qdev->ram_header->int_pending, 0); in qxl_irq_handler() 43 atomic_inc(&qdev->irq_received); in qxl_irq_handler() 46 atomic_inc(&qdev->irq_received_display); in qxl_irq_handler() 47 wake_up_all(&qdev->display_event); in qxl_irq_handler() 48 qxl_queue_garbage_collect(qdev, false); in qxl_irq_handler() 51 atomic_inc(&qdev->irq_received_cursor); in qxl_irq_handler() 52 wake_up_all(&qdev->cursor_event); in qxl_irq_handler() 55 atomic_inc(&qdev->irq_received_io_cmd); in qxl_irq_handler() 56 wake_up_all(&qdev->io_cmd_event); in qxl_irq_handler() [all …]
|
D | qxl_drv.h | 267 int qxl_device_init(struct qxl_device *qdev, struct pci_dev *pdev); 268 void qxl_device_fini(struct qxl_device *qdev); 270 int qxl_modeset_init(struct qxl_device *qdev); 271 void qxl_modeset_fini(struct qxl_device *qdev); 273 int qxl_bo_init(struct qxl_device *qdev); 274 void qxl_bo_fini(struct qxl_device *qdev); 276 void qxl_reinit_memslots(struct qxl_device *qdev); 277 int qxl_surf_evict(struct qxl_device *qdev); 278 int qxl_vram_evict(struct qxl_device *qdev); 291 qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo, in qxl_bo_physical_address() argument [all …]
|
D | qxl_release.c | 60 struct qxl_device *qdev; in qxl_fence_wait() local 63 qdev = container_of(fence->lock, struct qxl_device, release_lock); in qxl_fence_wait() 65 if (!wait_event_timeout(qdev->release_event, in qxl_fence_wait() 67 (qxl_io_notify_oom(qdev), 0)), in qxl_fence_wait() 84 qxl_release_alloc(struct qxl_device *qdev, int type, in qxl_release_alloc() argument 103 spin_lock(&qdev->release_idr_lock); in qxl_release_alloc() 104 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT); in qxl_release_alloc() 105 release->base.seqno = ++qdev->release_seqno; in qxl_release_alloc() 106 spin_unlock(&qdev->release_idr_lock); in qxl_release_alloc() 137 qxl_release_free(struct qxl_device *qdev, in qxl_release_free() argument [all …]
|
D | qxl_ttm.c | 43 struct qxl_device *qdev; in qxl_get_qdev() local 46 qdev = container_of(mman, struct qxl_device, mman); in qxl_get_qdev() 47 return qdev; in qxl_get_qdev() 76 struct qxl_device *qdev = qxl_get_qdev(bdev); in qxl_ttm_io_mem_reserve() local 84 mem->bus.offset = (mem->start << PAGE_SHIFT) + qdev->vram_base; in qxl_ttm_io_mem_reserve() 90 qdev->surfaceram_base; in qxl_ttm_io_mem_reserve() 128 struct qxl_device *qdev; in qxl_bo_move_notify() local 133 qdev = to_qxl(qbo->tbo.base.dev); in qxl_bo_move_notify() 136 qxl_surface_evict(qdev, qbo, new_mem ? true : false); in qxl_bo_move_notify() 175 static int qxl_ttm_init_mem_type(struct qxl_device *qdev, in qxl_ttm_init_mem_type() argument [all …]
|
D | qxl_debugfs.c | 42 struct qxl_device *qdev = to_qxl(node->minor->dev); in qxl_debugfs_irq_received() local 44 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received)); in qxl_debugfs_irq_received() 45 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display)); in qxl_debugfs_irq_received() 46 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor)); in qxl_debugfs_irq_received() 47 seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd)); in qxl_debugfs_irq_received() 48 seq_printf(m, "%d\n", qdev->irq_received_error); in qxl_debugfs_irq_received() 56 struct qxl_device *qdev = to_qxl(node->minor->dev); in qxl_debugfs_buffers_info() local 59 list_for_each_entry(bo, &qdev->gem.objects, list) { in qxl_debugfs_buffers_info() 95 void qxl_debugfs_add_files(struct qxl_device *qdev, in qxl_debugfs_add_files() argument 101 for (i = 0; i < qdev->debugfs_count; i++) { in qxl_debugfs_add_files() [all …]
|
D | qxl_drv.c | 78 struct qxl_device *qdev; in qxl_pci_probe() local 87 qdev = devm_drm_dev_alloc(&pdev->dev, &qxl_driver, in qxl_pci_probe() 89 if (IS_ERR(qdev)) { in qxl_pci_probe() 110 ret = qxl_device_init(qdev, pdev); in qxl_pci_probe() 114 ret = qxl_modeset_init(qdev); in qxl_pci_probe() 118 drm_kms_helper_poll_init(&qdev->ddev); in qxl_pci_probe() 121 ret = drm_dev_register(&qdev->ddev, ent->driver_data); in qxl_pci_probe() 125 drm_fbdev_generic_setup(&qdev->ddev, 32); in qxl_pci_probe() 129 qxl_modeset_fini(qdev); in qxl_pci_probe() 131 qxl_device_fini(qdev); in qxl_pci_probe() [all …]
|
D | qxl_object.c | 38 struct qxl_device *qdev; in qxl_ttm_bo_destroy() local 41 qdev = to_qxl(bo->tbo.base.dev); in qxl_ttm_bo_destroy() 43 qxl_surface_evict(qdev, bo, false); in qxl_ttm_bo_destroy() 45 mutex_lock(&qdev->gem.mutex); in qxl_ttm_bo_destroy() 47 mutex_unlock(&qdev->gem.mutex); in qxl_ttm_bo_destroy() 109 int qxl_bo_create(struct qxl_device *qdev, unsigned long size, in qxl_bo_create() argument 128 r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size); in qxl_bo_create() 144 r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type, in qxl_bo_create() 149 dev_err(qdev->ddev.dev, in qxl_bo_create() 206 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, in qxl_bo_kmap_atomic_page() argument [all …]
|
D | qxl_draw.c | 30 static int alloc_clips(struct qxl_device *qdev, in alloc_clips() argument 37 return qxl_alloc_bo_reserved(qdev, release, size, clips_bo); in alloc_clips() 43 static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, in drawable_set_clipping() argument 64 alloc_drawable(struct qxl_device *qdev, struct qxl_release **release) in alloc_drawable() argument 66 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable), in alloc_drawable() 71 free_drawable(struct qxl_device *qdev, struct qxl_release *release) in free_drawable() argument 73 qxl_release_free(qdev, release); in free_drawable() 78 make_drawable(struct qxl_device *qdev, int surface, uint8_t type, in make_drawable() argument 85 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); in make_drawable() 113 drawable->mm_time = qdev->rom->mm_clock; in make_drawable() [all …]
|
D | qxl_gem.c | 34 struct qxl_device *qdev; in qxl_gem_object_free() local 37 qdev = to_qxl(gobj->dev); in qxl_gem_object_free() 39 qxl_surface_evict(qdev, qobj, false); in qxl_gem_object_free() 45 int qxl_gem_object_create(struct qxl_device *qdev, int size, in qxl_gem_object_create() argument 58 r = qxl_bo_create(qdev, size, kernel, false, initial_domain, 0, surf, &qbo); in qxl_gem_object_create() 68 mutex_lock(&qdev->gem.mutex); in qxl_gem_object_create() 69 list_add_tail(&qbo->list, &qdev->gem.objects); in qxl_gem_object_create() 70 mutex_unlock(&qdev->gem.mutex); in qxl_gem_object_create() 81 int qxl_gem_object_create_with_handle(struct qxl_device *qdev, in qxl_gem_object_create_with_handle() argument 94 r = qxl_gem_object_create(qdev, size, 0, in qxl_gem_object_create_with_handle() [all …]
|
D | qxl_ioctl.c | 39 struct qxl_device *qdev = to_qxl(dev); in qxl_alloc_ioctl() local 49 ret = qxl_gem_object_create_with_handle(qdev, file_priv, in qxl_alloc_ioctl() 66 struct qxl_device *qdev = to_qxl(dev); in qxl_map_ioctl() local 69 return drm_gem_ttm_dumb_map_offset(file_priv, &qdev->ddev, qxl_map->handle, in qxl_map_ioctl() 87 apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) in apply_reloc() argument 91 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); in apply_reloc() 92 *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, in apply_reloc() 95 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); in apply_reloc() 99 apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) in apply_surf_reloc() argument 107 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); in apply_surf_reloc() [all …]
|
D | qxl_image.c | 33 qxl_allocate_chunk(struct qxl_device *qdev, in qxl_allocate_chunk() argument 45 ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); in qxl_allocate_chunk() 56 qxl_image_alloc_objects(struct qxl_device *qdev, in qxl_image_alloc_objects() argument 70 ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo); in qxl_image_alloc_objects() 76 ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height); in qxl_image_alloc_objects() 86 void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage) in qxl_image_free_objects() argument 100 qxl_image_init_helper(struct qxl_device *qdev, in qxl_image_init_helper() argument 127 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); in qxl_image_init_helper() 132 qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr); in qxl_image_init_helper() 146 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT); in qxl_image_init_helper() [all …]
|
D | qxl_object.h | 56 extern int qxl_bo_create(struct qxl_device *qdev, 66 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset); 67 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
|
D | qxl_dumb.c | 35 struct qxl_device *qdev = to_qxl(dev); in qxl_mode_dumb_create() local 64 r = qxl_gem_object_create_with_handle(qdev, file_priv, in qxl_mode_dumb_create()
|
/drivers/staging/qlge/ |
D | qlge_mpi.c | 4 int qlge_unpause_mpi_risc(struct qlge_adapter *qdev) in qlge_unpause_mpi_risc() argument 9 tmp = qlge_read32(qdev, CSR); in qlge_unpause_mpi_risc() 13 qlge_write32(qdev, CSR, CSR_CMD_CLR_PAUSE); in qlge_unpause_mpi_risc() 17 int qlge_pause_mpi_risc(struct qlge_adapter *qdev) in qlge_pause_mpi_risc() argument 23 qlge_write32(qdev, CSR, CSR_CMD_SET_PAUSE); in qlge_pause_mpi_risc() 25 tmp = qlge_read32(qdev, CSR); in qlge_pause_mpi_risc() 33 int qlge_hard_reset_mpi_risc(struct qlge_adapter *qdev) in qlge_hard_reset_mpi_risc() argument 39 qlge_write32(qdev, CSR, CSR_CMD_SET_RST); in qlge_hard_reset_mpi_risc() 41 tmp = qlge_read32(qdev, CSR); in qlge_hard_reset_mpi_risc() 43 qlge_write32(qdev, CSR, CSR_CMD_CLR_RST); in qlge_hard_reset_mpi_risc() [all …]
|
D | qlge_main.c | 102 static int qlge_sem_trylock(struct qlge_adapter *qdev, u32 sem_mask) in qlge_sem_trylock() argument 132 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n"); in qlge_sem_trylock() 136 qlge_write32(qdev, SEM, sem_bits | sem_mask); in qlge_sem_trylock() 137 return !(qlge_read32(qdev, SEM) & sem_bits); in qlge_sem_trylock() 140 int qlge_sem_spinlock(struct qlge_adapter *qdev, u32 sem_mask) in qlge_sem_spinlock() argument 145 if (!qlge_sem_trylock(qdev, sem_mask)) in qlge_sem_spinlock() 152 void qlge_sem_unlock(struct qlge_adapter *qdev, u32 sem_mask) in qlge_sem_unlock() argument 154 qlge_write32(qdev, SEM, sem_mask); in qlge_sem_unlock() 155 qlge_read32(qdev, SEM); /* flush */ in qlge_sem_unlock() 163 int qlge_wait_reg_rdy(struct qlge_adapter *qdev, u32 reg, u32 bit, u32 err_bit) in qlge_wait_reg_rdy() argument [all …]
|
D | qlge_ethtool.c | 186 static int qlge_update_ring_coalescing(struct qlge_adapter *qdev) in qlge_update_ring_coalescing() argument 192 if (!netif_running(qdev->ndev)) in qlge_update_ring_coalescing() 198 cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count]; in qlge_update_ring_coalescing() 199 if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs || in qlge_update_ring_coalescing() 200 le16_to_cpu(cqicb->pkt_delay) != qdev->tx_max_coalesced_frames) { in qlge_update_ring_coalescing() 201 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { in qlge_update_ring_coalescing() 202 rx_ring = &qdev->rx_ring[i]; in qlge_update_ring_coalescing() 204 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); in qlge_update_ring_coalescing() 206 cpu_to_le16(qdev->tx_max_coalesced_frames); in qlge_update_ring_coalescing() 208 status = qlge_write_cfg(qdev, cqicb, sizeof(*cqicb), in qlge_update_ring_coalescing() [all …]
|
D | qlge_dbg.c | 9 static u32 qlge_read_other_func_reg(struct qlge_adapter *qdev, in qlge_read_other_func_reg() argument 18 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) in qlge_read_other_func_reg() 20 status = qlge_read_mpi_reg(qdev, register_to_read, ®_val); in qlge_read_other_func_reg() 28 static int qlge_write_other_func_reg(struct qlge_adapter *qdev, in qlge_write_other_func_reg() argument 35 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) in qlge_write_other_func_reg() 38 return qlge_write_mpi_reg(qdev, register_to_read, reg_val); in qlge_write_other_func_reg() 41 static int qlge_wait_other_func_reg_rdy(struct qlge_adapter *qdev, u32 reg, in qlge_wait_other_func_reg_rdy() argument 48 temp = qlge_read_other_func_reg(qdev, reg); in qlge_wait_other_func_reg_rdy() 60 static int qlge_read_other_func_serdes_reg(struct qlge_adapter *qdev, u32 reg, in qlge_read_other_func_serdes_reg() argument 66 status = qlge_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, in qlge_read_other_func_serdes_reg() [all …]
|
D | qlge.h | 1368 #define QL_TXQ_IDX(qdev, skb) (smp_processor_id() % (qdev->tx_ring_count)) argument 1391 struct qlge_adapter *qdev; member 1490 struct qlge_adapter *qdev; member 1994 struct qlge_adapter *qdev; member 2059 int (*get_flash)(struct qlge_adapter *qdev); 2060 int (*port_initialize)(struct qlge_adapter *qdev); 2064 struct qlge_adapter *qdev; member 2073 return ndev_priv->qdev; in netdev_to_qdev() 2180 static inline u32 qlge_read32(const struct qlge_adapter *qdev, int reg) in qlge_read32() argument 2182 return readl(qdev->reg_base + reg); in qlge_read32() [all …]
|
D | qlge_devlink.c | 57 struct qlge_adapter *qdev = devlink_health_reporter_priv(reporter); in qlge_reporter_coredump() local 61 if (!netif_running(qdev->ndev)) in qlge_reporter_coredump() 64 if (test_bit(QL_FRC_COREDUMP, &qdev->flags)) { in qlge_reporter_coredump() 65 if (qlge_own_firmware(qdev)) { in qlge_reporter_coredump() 66 qlge_queue_fw_error(qdev); in qlge_reporter_coredump() 70 netif_err(qdev, ifup, qdev->ndev, in qlge_reporter_coredump() 80 err = qlge_core_dump(qdev, dump); in qlge_reporter_coredump() 86 qlge_soft_reset_mpi_risc(qdev); in qlge_reporter_coredump()
|
/drivers/s390/cio/ |
D | qdio.h | 276 struct qdio_irq *qdev = __irq; \ 277 if (qdev->perf_stat_enabled) \ 278 (qdev->perf_stat.__attr)++; \
|
/drivers/md/ |
D | raid5.c | 4883 struct r5dev *pdev, *qdev; in handle_stripe() local 5026 qdev = &sh->dev[sh->qd_idx]; in handle_stripe() 5036 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) in handle_stripe() 5037 && !test_bit(R5_LOCKED, &qdev->flags) in handle_stripe() 5038 && (test_bit(R5_UPTODATE, &qdev->flags) || in handle_stripe() 5039 test_bit(R5_Discard, &qdev->flags)))))) in handle_stripe()
|