/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_page_dirty.c | 85 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan_pagetable() local 92 offset, dirty->bitmap_size, in vmw_bo_dirty_scan_pagetable() 93 offset, &dirty->bitmap[0], in vmw_bo_dirty_scan_pagetable() 94 &dirty->start, &dirty->end); in vmw_bo_dirty_scan_pagetable() 96 dirty->change_count++; in vmw_bo_dirty_scan_pagetable() 98 dirty->change_count = 0; in vmw_bo_dirty_scan_pagetable() 100 if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) { in vmw_bo_dirty_scan_pagetable() 101 dirty->change_count = 0; in vmw_bo_dirty_scan_pagetable() 102 dirty->method = VMW_BO_DIRTY_MKWRITE; in vmw_bo_dirty_scan_pagetable() 104 offset, dirty->bitmap_size); in vmw_bo_dirty_scan_pagetable() [all …]
|
D | vmwgfx_fb.c | 65 } dirty; member 190 if (!READ_ONCE(par->dirty.active)) in vmw_fb_dirty_flush() 203 spin_lock_irqsave(&par->dirty.lock, irq_flags); in vmw_fb_dirty_flush() 204 if (!par->dirty.active) { in vmw_fb_dirty_flush() 205 spin_unlock_irqrestore(&par->dirty.lock, irq_flags); in vmw_fb_dirty_flush() 217 dst_x1 = par->dirty.x1 - par->fb_x; in vmw_fb_dirty_flush() 218 dst_y1 = par->dirty.y1 - par->fb_y; in vmw_fb_dirty_flush() 222 dst_x2 = par->dirty.x2 - par->fb_x; in vmw_fb_dirty_flush() 223 dst_y2 = par->dirty.y2 - par->fb_y; in vmw_fb_dirty_flush() 231 par->dirty.x1 = par->dirty.x2 = 0; in vmw_fb_dirty_flush() [all …]
|
D | vmwgfx_scrn.c | 1024 static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty) in vmw_sou_surface_fifo_commit() argument 1027 container_of(dirty, typeof(*sdirty), base); in vmw_sou_surface_fifo_commit() 1028 struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd; in vmw_sou_surface_fifo_commit() 1029 s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x; in vmw_sou_surface_fifo_commit() 1030 s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y; in vmw_sou_surface_fifo_commit() 1031 size_t region_size = dirty->num_hits * sizeof(SVGASignedRect); in vmw_sou_surface_fifo_commit() 1035 if (!dirty->num_hits) { in vmw_sou_surface_fifo_commit() 1036 vmw_cmd_commit(dirty->dev_priv, 0); in vmw_sou_surface_fifo_commit() 1058 cmd->body.destScreenId = dirty->unit->unit; in vmw_sou_surface_fifo_commit() 1061 for (i = 0; i < dirty->num_hits; ++i, ++blit) { in vmw_sou_surface_fifo_commit() [all …]
|
D | vmwgfx_stdu.c | 458 static void vmw_stdu_bo_clip(struct vmw_kms_dirty *dirty) in vmw_stdu_bo_clip() argument 461 container_of(dirty, struct vmw_stdu_dirty, base); in vmw_stdu_bo_clip() 462 struct vmw_stdu_dma *cmd = dirty->cmd; in vmw_stdu_bo_clip() 465 blit += dirty->num_hits; in vmw_stdu_bo_clip() 466 blit->srcx = dirty->fb_x; in vmw_stdu_bo_clip() 467 blit->srcy = dirty->fb_y; in vmw_stdu_bo_clip() 468 blit->x = dirty->unit_x1; in vmw_stdu_bo_clip() 469 blit->y = dirty->unit_y1; in vmw_stdu_bo_clip() 471 blit->w = dirty->unit_x2 - dirty->unit_x1; in vmw_stdu_bo_clip() 472 blit->h = dirty->unit_y2 - dirty->unit_y1; in vmw_stdu_bo_clip() [all …]
|
D | vmwgfx_surface.c | 672 WARN_ON_ONCE(res->dirty); in vmw_user_surface_free() 1235 if (res->backup->dirty && res->backup_dirty) { in vmw_gb_surface_bind() 1713 static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty, in vmw_subres_dirty_add() argument 1717 const struct vmw_surface_cache *cache = &dirty->cache; in vmw_subres_dirty_add() 1718 SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource]; in vmw_subres_dirty_add() 1723 if (WARN_ON(loc_start->sub_resource >= dirty->num_subres)) in vmw_subres_dirty_add() 1761 static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres) in vmw_subres_dirty_full() argument 1763 const struct vmw_surface_cache *cache = &dirty->cache; in vmw_subres_dirty_full() 1766 SVGA3dBox *box = &dirty->boxes[subres]; in vmw_subres_dirty_full() 1783 struct vmw_surface_dirty *dirty = in vmw_surface_tex_dirty_range_add() local [all …]
|
D | vmwgfx_kms.c | 896 .dirty = drm_atomic_helper_dirtyfb, 1077 .dirty = vmw_framebuffer_bo_dirty_ext, 2396 struct vmw_kms_dirty *dirty) in vmw_kms_helper_dirty() argument 2403 dirty->dev_priv = dev_priv; in vmw_kms_helper_dirty() 2406 if (dirty->crtc) { in vmw_kms_helper_dirty() 2407 units[num_units++] = vmw_crtc_to_du(dirty->crtc); in vmw_kms_helper_dirty() 2427 dirty->unit = unit; in vmw_kms_helper_dirty() 2428 if (dirty->fifo_reserve_size > 0) { in vmw_kms_helper_dirty() 2429 dirty->cmd = VMW_CMD_RESERVE(dev_priv, in vmw_kms_helper_dirty() 2430 dirty->fifo_reserve_size); in vmw_kms_helper_dirty() [all …]
|
D | vmwgfx_validation.c | 83 u32 dirty : 1; member 317 u32 dirty, in vmw_validation_add_resource() argument 368 if (dirty) { in vmw_validation_add_resource() 371 node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0; in vmw_validation_add_resource() 391 void *val_private, u32 dirty) in vmw_validation_res_set_dirty() argument 395 if (!dirty) in vmw_validation_res_set_dirty() 401 val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0; in vmw_validation_res_set_dirty() 513 val->dirty, in vmw_validation_res_unreserve() 621 if (vbo->dirty) in vmw_validation_bo_validate() 649 val->dirty); in vmw_validation_res_validate()
|
D | vmwgfx_binding.c | 109 unsigned long dirty; member 762 unsigned long *dirty, in vmw_collect_dirty_view_ids() argument 770 i = find_first_bit(dirty, max_num); in vmw_collect_dirty_view_ids() 781 next_bit = find_next_bit(dirty, max_num, i + 1); in vmw_collect_dirty_view_ids() 959 if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty)) in vmw_binding_emit_dirty_ps() 966 __clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty); in vmw_binding_emit_dirty_ps() 989 unsigned long *dirty, in vmw_collect_dirty_vbs() argument 998 i = find_first_bit(dirty, max_num); in vmw_collect_dirty_vbs() 1015 next_bit = find_next_bit(dirty, max_num, i + 1); in vmw_collect_dirty_vbs() 1138 while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit)) in vmw_binding_emit_dirty() [all …]
|
D | vmwgfx_resource.c | 138 if (res->dirty) in vmw_resource_release() 232 res->dirty = NULL; in vmw_resource_init() 424 if (res->backup->dirty && !res->dirty) { in vmw_resource_do_validate() 428 } else if (!res->backup->dirty && res->dirty) { in vmw_resource_do_validate() 437 if (res->dirty) { in vmw_resource_do_validate() 476 bool dirty, in vmw_resource_unreserve() argument 501 WARN_ON(res->coherent && !new_backup->dirty); in vmw_resource_unreserve() 515 res->res_dirty = dirty; in vmw_resource_unreserve() 1088 if (res->dirty) in vmw_resource_dirty_update()
|
/drivers/gpu/drm/mga/ |
D | mga_state.c | 330 unsigned int dirty = sarea_priv->dirty; in mga_g200_emit_state() local 337 if (dirty & MGA_UPLOAD_CONTEXT) { in mga_g200_emit_state() 339 sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT; in mga_g200_emit_state() 342 if (dirty & MGA_UPLOAD_TEX0) { in mga_g200_emit_state() 344 sarea_priv->dirty &= ~MGA_UPLOAD_TEX0; in mga_g200_emit_state() 351 unsigned int dirty = sarea_priv->dirty; in mga_g400_emit_state() local 359 if (dirty & MGA_UPLOAD_CONTEXT) { in mga_g400_emit_state() 361 sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT; in mga_g400_emit_state() 364 if (dirty & MGA_UPLOAD_TEX0) { in mga_g400_emit_state() 366 sarea_priv->dirty &= ~MGA_UPLOAD_TEX0; in mga_g400_emit_state() [all …]
|
/drivers/misc/sgi-gru/ |
D | gruhandles.c | 158 unsigned long vaddr, int asid, int dirty, in tfh_write_only() argument 165 tfh->dirty = dirty; in tfh_write_only() 174 unsigned long vaddr, int asid, int dirty, in tfh_write_restart() argument 181 tfh->dirty = dirty; in tfh_write_restart()
|
/drivers/gpu/drm/r128/ |
D | r128_state.c | 231 unsigned int dirty = sarea_priv->dirty; in r128_emit_state() local 233 DRM_DEBUG("dirty=0x%08x\n", dirty); in r128_emit_state() 235 if (dirty & R128_UPLOAD_CORE) { in r128_emit_state() 237 sarea_priv->dirty &= ~R128_UPLOAD_CORE; in r128_emit_state() 240 if (dirty & R128_UPLOAD_CONTEXT) { in r128_emit_state() 242 sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT; in r128_emit_state() 245 if (dirty & R128_UPLOAD_SETUP) { in r128_emit_state() 247 sarea_priv->dirty &= ~R128_UPLOAD_SETUP; in r128_emit_state() 250 if (dirty & R128_UPLOAD_MASKS) { in r128_emit_state() 252 sarea_priv->dirty &= ~R128_UPLOAD_MASKS; in r128_emit_state() [all …]
|
/drivers/md/ |
D | dm-cache-policy-smq.c | 43 bool dirty:1; member 722 e->dirty = true; /* FIXME: audit */ in init_entry() 825 struct queue dirty; member 886 struct queue *q = &mq->dirty; in __update_writeback_sentinels() 932 q_push(&mq->dirty, sentinel); in __sentinels_init() 958 q_del(e->dirty ? &mq->dirty : &mq->clean, e); in del_queue() 963 if (e->dirty) in push_queue() 964 q_push(&mq->dirty, e); in push_queue() 979 if (e->dirty) in push_queue_front() 980 q_push_front(&mq->dirty, e); in push_queue_front() [all …]
|
/drivers/char/xilinx_hwicap/ |
D | buffer_icap.c | 272 bool dirty = false; in buffer_icap_set_configuration() local 281 dirty = true; in buffer_icap_set_configuration() 300 dirty = false; in buffer_icap_set_configuration() 304 if (dirty) { in buffer_icap_set_configuration()
|
/drivers/md/persistent-data/ |
D | dm-bitset.c | 110 if (!info->current_index_set || !info->dirty) in dm_bitset_flush() 122 info->dirty = false; in dm_bitset_flush() 141 info->dirty = false; in read_bits() 175 info->dirty = true; in dm_bitset_set_bit() 192 info->dirty = true; in dm_bitset_clear_bit()
|
/drivers/input/joystick/ |
D | grip_mp.c | 48 int dirty; /* has the state been updated? */ member 415 port->dirty = 1; in get_and_decode_packet() 426 port->dirty = 0; in get_and_decode_packet() 529 port->dirty = 0; in report_slot() 553 if (grip->port[i]->dirty) in grip_poll() 624 if (port->dirty) /* report initial state, if any */ in register_slot()
|
/drivers/gpu/drm/i810/ |
D | i810_dma.c | 563 unsigned int dirty = sarea_priv->dirty; in i810EmitState() local 565 DRM_DEBUG("%x\n", dirty); in i810EmitState() 567 if (dirty & I810_UPLOAD_BUFFERS) { in i810EmitState() 569 sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS; in i810EmitState() 572 if (dirty & I810_UPLOAD_CTX) { in i810EmitState() 574 sarea_priv->dirty &= ~I810_UPLOAD_CTX; in i810EmitState() 577 if (dirty & I810_UPLOAD_TEX0) { in i810EmitState() 579 sarea_priv->dirty &= ~I810_UPLOAD_TEX0; in i810EmitState() 582 if (dirty & I810_UPLOAD_TEX1) { in i810EmitState() 584 sarea_priv->dirty &= ~I810_UPLOAD_TEX1; in i810EmitState() [all …]
|
/drivers/net/ethernet/netronome/nfp/flower/ |
D | lag_conf.c | 65 bool dirty; member 130 group->dirty = true; in nfp_fl_lag_group_create() 342 entry->dirty = true; in nfp_fl_lag_do_work() 351 if (slaves != entry->slave_cnt || !entry->dirty) { in nfp_fl_lag_do_work() 360 entry->dirty = false; in nfp_fl_lag_do_work() 454 group_entry->dirty = true; in nfp_flower_lag_unprocessed_msg() 576 group->dirty = true; in nfp_fl_lag_changeupper_event()
|
/drivers/infiniband/hw/hfi1/ |
D | user_pages.c | 96 size_t npages, bool dirty) in hfi1_release_user_pages() argument 98 unpin_user_pages_dirty_lock(p, npages, dirty); in hfi1_release_user_pages()
|
/drivers/soc/qcom/ |
D | rpmh.c | 154 ctrlr->dirty |= (req->sleep_val != old_sleep_val || in cache_rpm_request() 287 ctrlr->dirty = true; in cache_batch() 451 if (!ctrlr->dirty) { in rpmh_flush() 480 ctrlr->dirty = false; in rpmh_flush() 504 ctrlr->dirty = true; in rpmh_invalidate()
|
/drivers/net/ethernet/atheros/ |
D | ag71xx.c | 319 unsigned int dirty; member 802 while (ring->dirty + n != ring->curr) { in ag71xx_tx_packets() 807 i = (ring->dirty + n) & ring_mask; in ag71xx_tx_packets() 834 ring->dirty += n; in ag71xx_tx_packets() 851 if ((ring->curr - ring->dirty) < (ring_size * 3) / 4) in ag71xx_tx_packets() 983 ag->tx_ring.dirty = 0; in ag71xx_fast_reset() 1199 while (ring->curr != ring->dirty) { in ag71xx_ring_tx_clean() 1201 u32 i = ring->dirty & ring_mask; in ag71xx_ring_tx_clean() 1215 ring->dirty++; in ag71xx_ring_tx_clean() 1245 ring->dirty = 0; in ag71xx_ring_tx_init() [all …]
|
/drivers/infiniband/hw/qib/ |
D | qib_user_pages.c | 41 int dirty) in __qib_release_user_pages() argument 43 unpin_user_pages_dirty_lock(p, num_pages, dirty); in __qib_release_user_pages()
|
/drivers/net/ethernet/synopsys/ |
D | dwc-xlgmac-net.c | 30 return (ring->dma_desc_count - (ring->cur - ring->dirty)); in xlgmac_tx_avail_desc() 35 return (ring->cur - ring->dirty); in xlgmac_rx_dirty_desc() 962 while (ring->dirty != ring->cur) { in xlgmac_rx_refresh() 963 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty); in xlgmac_rx_refresh() 971 hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty); in xlgmac_rx_refresh() 973 ring->dirty++; in xlgmac_rx_refresh() 982 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1); in xlgmac_rx_refresh() 1063 (ring->dirty != cur)) { in xlgmac_tx_poll() 1064 desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty); in xlgmac_tx_poll() 1076 xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0); in xlgmac_tx_poll() [all …]
|
/drivers/infiniband/core/ |
D | umem.c | 48 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) in __ib_umem_release() argument 50 bool make_dirty = umem->writable && dirty; in __ib_umem_release() 54 if (dirty) in __ib_umem_release()
|
/drivers/gpu/drm/tiny/ |
D | gm12u320.c | 399 struct drm_rect *dirty) in gm12u320_fb_mark_dirty() argument 411 gm12u320->fb_update.rect = *dirty; in gm12u320_fb_mark_dirty() 417 rect->x1 = min(rect->x1, dirty->x1); in gm12u320_fb_mark_dirty() 418 rect->y1 = min(rect->y1, dirty->y1); in gm12u320_fb_mark_dirty() 419 rect->x2 = max(rect->x2, dirty->x2); in gm12u320_fb_mark_dirty() 420 rect->y2 = max(rect->y2, dirty->y2); in gm12u320_fb_mark_dirty()
|