Home
last modified time | relevance | path

Searched refs:page_offset (Results 1 – 25 of 126) sorted by relevance

123456

/drivers/scsi/fnic/
Dfnic_trace.c71 fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx]; in fnic_trace_get_buf()
124 fnic_trace_entries.page_offset[rd_idx]; in fnic_get_trace_data()
166 fnic_trace_entries.page_offset[rd_idx]; in fnic_get_trace_data()
490 fnic_trace_entries.page_offset = in fnic_trace_buf_init()
493 if (!fnic_trace_entries.page_offset) { in fnic_trace_buf_init()
503 memset((void *)fnic_trace_entries.page_offset, 0, in fnic_trace_buf_init()
514 fnic_trace_entries.page_offset[i] = fnic_buf_head; in fnic_trace_buf_init()
532 if (fnic_trace_entries.page_offset) { in fnic_trace_free()
533 vfree((void *)fnic_trace_entries.page_offset); in fnic_trace_free()
534 fnic_trace_entries.page_offset = NULL; in fnic_trace_free()
[all …]
/drivers/net/ethernet/cavium/liquidio/
Docteon_network.h298 pg_info->page_offset = 0; in recv_buffer_alloc()
300 skb_pg_info->page_offset = 0; in recv_buffer_alloc()
324 skb_pg_info->page_offset = 0; in recv_buffer_fast_alloc()
348 pg_info->page_offset = 0; in recv_buffer_recycle()
353 if (pg_info->page_offset == 0) in recv_buffer_recycle()
354 pg_info->page_offset = LIO_RXBUFFER_SZ; in recv_buffer_recycle()
356 pg_info->page_offset = 0; in recv_buffer_recycle()
384 skb_pg_info->page_offset = pg_info->page_offset; in recv_buffer_reuse()
398 pg_info->page_offset = 0; in recv_buffer_destroy()
415 pg_info->page_offset = 0; in recv_buffer_free()
[all …]
/drivers/gpu/drm/vmwgfx/
Dvmwgfx_page_dirty.c399 unsigned long page_offset; in vmw_bo_vm_mkwrite() local
415 page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); in vmw_bo_vm_mkwrite()
416 if (unlikely(page_offset >= bo->resource->num_pages)) { in vmw_bo_vm_mkwrite()
422 !test_bit(page_offset, &vbo->dirty->bitmap[0])) { in vmw_bo_vm_mkwrite()
425 __set_bit(page_offset, &dirty->bitmap[0]); in vmw_bo_vm_mkwrite()
426 dirty->start = min(dirty->start, page_offset); in vmw_bo_vm_mkwrite()
427 dirty->end = max(dirty->end, page_offset + 1); in vmw_bo_vm_mkwrite()
455 unsigned long page_offset; in vmw_bo_vm_fault() local
457 page_offset = vmf->pgoff - in vmw_bo_vm_fault()
459 if (page_offset >= bo->resource->num_pages || in vmw_bo_vm_fault()
[all …]
/drivers/gpu/drm/ttm/
Dttm_bo_vm.c98 unsigned long page_offset) in ttm_bo_io_mem_pfn() argument
103 return bdev->funcs->io_mem_pfn(bo, page_offset); in ttm_bo_io_mem_pfn()
105 return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset; in ttm_bo_io_mem_pfn()
199 unsigned long page_offset; in ttm_bo_vm_fault_reserved() local
221 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + in ttm_bo_vm_fault_reserved()
226 if (unlikely(page_offset >= bo->resource->num_pages)) in ttm_bo_vm_fault_reserved()
251 pfn = ttm_bo_io_mem_pfn(bo, page_offset); in ttm_bo_vm_fault_reserved()
253 page = ttm->pages[page_offset]; in ttm_bo_vm_fault_reserved()
260 page_offset; in ttm_bo_vm_fault_reserved()
283 if (unlikely(++page_offset >= page_last)) in ttm_bo_vm_fault_reserved()
/drivers/infiniband/hw/mlx5/
Dmem.c67 u64 page_offset; in __mlx5_umem_find_best_quantized_pgoff() local
80 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff()
81 while (page_offset & ~(u64)(page_offset_mask * (page_size / scale))) { in __mlx5_umem_find_best_quantized_pgoff()
83 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff()
94 (unsigned long)page_offset / (page_size / scale); in __mlx5_umem_find_best_quantized_pgoff()
Dsrq_cmd.c17 u32 page_offset = in->page_offset; in get_pas_size() local
21 u32 rq_sz_po = rq_sz + (page_offset * po_quanta); in get_pas_size()
34 MLX5_SET(wq, wq, page_offset, in->page_offset); in set_wq()
47 MLX5_SET(srqc, srqc, page_offset, in->page_offset); in set_srqc()
62 in->page_offset = MLX5_GET(wq, wq, page_offset); in get_wq()
75 in->page_offset = MLX5_GET(srqc, srqc, page_offset); in get_srqc()
111 MLX5_ADAPTER_PAGE_SHIFT, page_offset, \
112 64, &(in)->page_offset))
/drivers/mtd/tests/
Dnandbiterrs.c45 static unsigned page_offset; variable
46 module_param(page_offset, uint, S_IRUGO);
47 MODULE_PARM_DESC(page_offset, "Page number relative to dev start");
358 offset = (loff_t)page_offset * mtd->writesize; in mtd_nandbiterrs_init()
362 page_offset, offset, eraseblock); in mtd_nandbiterrs_init()
/drivers/nvmem/
Drave-sp-eeprom.c161 const unsigned int page_offset = offset % RAVE_SP_EEPROM_PAGE_SIZE; in rave_sp_eeprom_page_access() local
172 if (WARN_ON(data_len > sizeof(page.data) - page_offset)) in rave_sp_eeprom_page_access()
187 memcpy(&page.data[page_offset], data, data_len); in rave_sp_eeprom_page_access()
199 memcpy(data, &page.data[page_offset], data_len); in rave_sp_eeprom_page_access()
/drivers/net/ethernet/intel/iavf/
Diavf_txrx.c684 rx_bi->page_offset, in iavf_clean_rx_ring()
697 rx_bi->page_offset = 0; in iavf_clean_rx_ring()
849 bi->page_offset = iavf_rx_offset(rx_ring); in iavf_alloc_mapped_page()
901 bi->page_offset, in iavf_alloc_rx_buffers()
908 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in iavf_alloc_rx_buffers()
1143 new_buff->page_offset = old_buff->page_offset; in iavf_reuse_rx_page()
1190 if (rx_buffer->page_offset > IAVF_LAST_OFFSET) in iavf_can_reuse_rx_page()
1233 rx_buffer->page_offset, size, truesize); in iavf_add_rx_frag()
1237 rx_buffer->page_offset ^= truesize; in iavf_add_rx_frag()
1239 rx_buffer->page_offset += truesize; in iavf_add_rx_frag()
[all …]
/drivers/gpu/drm/qxl/
Dqxl_image.c166 unsigned int page_base, page_offset, out_offset; in qxl_image_init_helper() local
175 page_offset = offset_in_page(out_offset); in qxl_image_init_helper()
176 size = min((int)(PAGE_SIZE - page_offset), remain); in qxl_image_init_helper()
179 k_data = ptr + page_offset; in qxl_image_init_helper()
Dqxl_object.c207 struct qxl_bo *bo, int page_offset) in qxl_bo_kmap_atomic_page() argument
223 return io_mapping_map_atomic_wc(map, offset + page_offset); in qxl_bo_kmap_atomic_page()
226 rptr = bo->kptr + (page_offset * PAGE_SIZE); in qxl_bo_kmap_atomic_page()
235 rptr += page_offset * PAGE_SIZE; in qxl_bo_kmap_atomic_page()
/drivers/net/ethernet/sfc/falcon/
Drx.c59 return page_address(buf->page) + buf->page_offset; in ef4_rx_buf_va()
157 unsigned int page_offset; in ef4_init_rx_buffers() local
188 page_offset = sizeof(struct ef4_rx_page_state); in ef4_init_rx_buffers()
195 rx_buf->page_offset = page_offset + efx->rx_ip_align; in ef4_init_rx_buffers()
201 page_offset += efx->rx_page_buf_step; in ef4_init_rx_buffers()
202 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); in ef4_init_rx_buffers()
454 rx_buf->page, rx_buf->page_offset, in ef4_rx_packet_gro()
499 rx_buf->page_offset += hdr_len; in ef4_rx_mk_skb()
504 rx_buf->page, rx_buf->page_offset, in ef4_rx_mk_skb()
586 rx_buf->page_offset += efx->rx_prefix_size; in ef4_rx_packet()
/drivers/gpu/drm/gma500/
Dgem.c144 pgoff_t page_offset; in psb_gem_fault() local
172 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in psb_gem_fault()
178 pfn = page_to_pfn(r->pages[page_offset]); in psb_gem_fault()
/drivers/gpu/drm/vgem/
Dvgem_drv.c82 pgoff_t page_offset; in vgem_gem_fault() local
83 page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT; in vgem_gem_fault()
87 if (page_offset >= num_pages) in vgem_gem_fault()
92 get_page(obj->pages[page_offset]); in vgem_gem_fault()
93 vmf->page = obj->pages[page_offset]; in vgem_gem_fault()
102 page_offset); in vgem_gem_fault()
/drivers/net/ethernet/google/gve/
Dgve_rx_dqo.c164 buf_state->page_info.page_offset = 0; in gve_alloc_page_dqo()
366 buf_state->page_info.page_offset); in gve_rx_post_buffers_dqo()
398 buf_state->page_info.page_offset; in gve_try_recycle_buf()
402 buf_state->page_info.page_offset += data_buffer_size; in gve_try_recycle_buf()
403 buf_state->page_info.page_offset &= (PAGE_SIZE - 1); in gve_try_recycle_buf()
408 if (buf_state->page_info.page_offset == in gve_try_recycle_buf()
507 buf_state->page_info.page_offset, in gve_rx_append_frags()
555 buf_state->page_info.page_offset, in gve_rx_dqo()
592 buf_state->page_info.page_offset, buf_len, in gve_rx_dqo()
/drivers/net/ethernet/mellanox/mlx4/
Den_rx.c70 frag->page_offset = priv->rx_headroom; in mlx4_alloc_page()
89 frags->page_offset); in mlx4_en_alloc_frags()
148 frags->page_offset = XDP_PACKET_HEADROOM; in mlx4_en_prepare_rx_desc()
488 dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset, in mlx4_en_complete_rx_desc()
491 __skb_fill_page_desc(skb, nr, page, frags->page_offset, in mlx4_en_complete_rx_desc()
496 frags->page_offset ^= PAGE_SIZE / 2; in mlx4_en_complete_rx_desc()
507 frags->page_offset += sz_align; in mlx4_en_complete_rx_desc()
508 release = frags->page_offset + frag_info->frag_size > PAGE_SIZE; in mlx4_en_complete_rx_desc()
704 va = page_address(frags[0].page) + frags[0].page_offset; in mlx4_en_process_rx_cq()
733 dma = frags[0].dma + frags[0].page_offset; in mlx4_en_process_rx_cq()
[all …]
/drivers/gpu/drm/panfrost/
Dpanfrost_mmu.c410 pgoff_t page_offset; in panfrost_mmu_map_fault_addr() local
429 page_offset = addr >> PAGE_SHIFT; in panfrost_mmu_map_fault_addr()
430 page_offset -= bomapping->mmnode.start; in panfrost_mmu_map_fault_addr()
456 if (pages[page_offset]) { in panfrost_mmu_map_fault_addr()
466 for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { in panfrost_mmu_map_fault_addr()
478 sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; in panfrost_mmu_map_fault_addr()
479 ret = sg_alloc_table_from_pages(sgt, pages + page_offset, in panfrost_mmu_map_fault_addr()
/drivers/net/ethernet/pensando/ionic/
Dionic_txrx.c71 buf_info->page_offset = 0; in ionic_rx_page_alloc()
110 buf_info->page_offset += size; in ionic_rx_buf_recycle()
111 if (buf_info->page_offset >= IONIC_PAGE_SIZE) in ionic_rx_buf_recycle()
155 IONIC_PAGE_SIZE - buf_info->page_offset)); in ionic_rx_frags()
159 buf_info->dma_addr + buf_info->page_offset, in ionic_rx_frags()
163 buf_info->page, buf_info->page_offset, frag_len, in ionic_rx_frags()
209 dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset, in ionic_rx_copybreak()
211 skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len); in ionic_rx_copybreak()
212 dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset, in ionic_rx_copybreak()
391 desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); in ionic_rx_fill()
[all …]
/drivers/net/ethernet/sfc/
Drx.c93 rx_buf->page_offset += hdr_len; in efx_rx_mk_skb()
98 rx_buf->page, rx_buf->page_offset, in efx_rx_mk_skb()
177 rx_buf->page_offset += efx->rx_prefix_size; in efx_rx_packet()
303 rx_buf->page_offset += offset; in efx_do_xdp()
Drx_common.h23 return page_address(buf->page) + buf->page_offset; in efx_rx_buf_va()
57 unsigned int page_offset,
/drivers/net/ethernet/intel/ice/
Dice_txrx.c403 rx_buf->page_offset, in ice_clean_rx_ring()
413 rx_buf->page_offset = 0; in ice_clean_rx_ring()
648 bi->page_offset = rx_ring->rx_offset; in ice_alloc_mapped_page()
690 bi->page_offset, in ice_alloc_rx_bufs()
697 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in ice_alloc_rx_bufs()
735 rx_buf->page_offset ^= size; in ice_rx_buf_adjust_pg_offset()
738 rx_buf->page_offset += size; in ice_rx_buf_adjust_pg_offset()
769 if (rx_buf->page_offset > ICE_LAST_OFFSET) in ice_can_reuse_rx_page()
809 rx_buf->page_offset, size, truesize); in ice_add_rx_frag()
840 new_buf->page_offset = old_buf->page_offset; in ice_reuse_rx_page()
[all …]
/drivers/net/ethernet/qlogic/qede/
Dqede_fp.c57 sw_rx_data->page_offset = 0; in qede_alloc_rx_buffer()
535 new_mapping = curr_prod->mapping + curr_prod->page_offset; in qede_reuse_page()
563 curr_cons->page_offset += rxq->rx_buf_seg_size; in qede_realloc_rx_buffer()
565 if (curr_cons->page_offset == PAGE_SIZE) { in qede_realloc_rx_buffer()
570 curr_cons->page_offset -= rxq->rx_buf_seg_size; in qede_realloc_rx_buffer()
680 current_bd->page_offset + rxq->rx_headroom, in qede_fill_frag_skb()
747 buf = page_address(bd->data) + bd->page_offset; in qede_build_skb()
768 bd->page_offset += rxq->rx_buf_seg_size; in qede_tpa_rx_build_skb()
770 if (bd->page_offset == PAGE_SIZE) { in qede_tpa_rx_build_skb()
774 bd->page_offset -= rxq->rx_buf_seg_size; in qede_tpa_rx_build_skb()
[all …]
/drivers/mtd/devices/
Dmtd_dataflash.c90 unsigned short page_offset; /* offset in flash address */ member
199 pageaddr = pageaddr << priv->page_offset; in dataflash_erase()
257 addr = (((unsigned)from / priv->page_size) << priv->page_offset) in dataflash_read()
357 addr = pageaddr << priv->page_offset; in dataflash_write()
402 addr = pageaddr << priv->page_offset; in dataflash_write()
645 priv->page_offset = pageoffset; in add_dataflash_otp()
/drivers/net/ethernet/intel/i40e/
Di40e_txrx.c1382 new_buff->page_offset = old_buff->page_offset; in i40e_reuse_rx_page()
1512 rx_bi->page_offset, in i40e_clean_rx_ring()
1525 rx_bi->page_offset = 0; in i40e_clean_rx_ring()
1692 bi->page_offset = rx_ring->rx_offset; in i40e_alloc_mapped_page()
1725 bi->page_offset, in i40e_alloc_rx_buffers()
1732 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in i40e_alloc_rx_buffers()
2006 if (rx_buffer->page_offset > I40E_LAST_OFFSET) in i40e_can_reuse_rx_page()
2046 rx_buffer->page_offset, size, truesize); in i40e_add_rx_frag()
2050 rx_buffer->page_offset ^= truesize; in i40e_add_rx_frag()
2052 rx_buffer->page_offset += truesize; in i40e_add_rx_frag()
[all …]
/drivers/gpu/drm/
Ddrm_vma_manager.c86 unsigned long page_offset, unsigned long size) in drm_vma_offset_manager_init() argument
89 drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size); in drm_vma_offset_manager_init()

123456