• Home
  • Raw
  • Download

Lines Matching refs:lbq_desc

1033 	struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];  in ql_get_curr_lbuf()  local
1038 return lbq_desc; in ql_get_curr_lbuf()
1044 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); in ql_get_curr_lchunk() local
1047 dma_unmap_addr(lbq_desc, mapaddr), in ql_get_curr_lchunk()
1054 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) in ql_get_curr_lchunk()
1057 lbq_desc->p.pg_chunk.map, in ql_get_curr_lchunk()
1060 return lbq_desc; in ql_get_curr_lchunk()
1091 struct bq_desc *lbq_desc) in ql_get_next_chunk() argument
1122 lbq_desc->p.pg_chunk = rx_ring->pg_chunk; in ql_get_next_chunk()
1130 lbq_desc->p.pg_chunk.last_flag = 1; in ql_get_next_chunk()
1134 lbq_desc->p.pg_chunk.last_flag = 0; in ql_get_next_chunk()
1143 struct bq_desc *lbq_desc; in ql_update_lbq() local
1152 lbq_desc = &rx_ring->lbq[clean_idx]; in ql_update_lbq()
1153 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { in ql_update_lbq()
1161 map = lbq_desc->p.pg_chunk.map + in ql_update_lbq()
1162 lbq_desc->p.pg_chunk.offset; in ql_update_lbq()
1163 dma_unmap_addr_set(lbq_desc, mapaddr, map); in ql_update_lbq()
1164 dma_unmap_len_set(lbq_desc, maplen, in ql_update_lbq()
1166 *lbq_desc->addr = cpu_to_le64(map); in ql_update_lbq()
1499 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_process_mac_rx_gro_page() local
1505 put_page(lbq_desc->p.pg_chunk.page); in ql_process_mac_rx_gro_page()
1515 put_page(lbq_desc->p.pg_chunk.page); in ql_process_mac_rx_gro_page()
1518 prefetch(lbq_desc->p.pg_chunk.va); in ql_process_mac_rx_gro_page()
1520 lbq_desc->p.pg_chunk.page, in ql_process_mac_rx_gro_page()
1521 lbq_desc->p.pg_chunk.offset, in ql_process_mac_rx_gro_page()
1548 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_process_mac_rx_page() local
1555 put_page(lbq_desc->p.pg_chunk.page); in ql_process_mac_rx_page()
1559 addr = lbq_desc->p.pg_chunk.va; in ql_process_mac_rx_page()
1584 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, in ql_process_mac_rx_page()
1585 lbq_desc->p.pg_chunk.offset + hlen, in ql_process_mac_rx_page()
1628 put_page(lbq_desc->p.pg_chunk.page); in ql_process_mac_rx_page()
1764 struct bq_desc *lbq_desc; in ql_build_rx_skb() local
1856 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_build_rx_skb()
1859 lbq_desc->p.pg_chunk.offset, length); in ql_build_rx_skb()
1860 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, in ql_build_rx_skb()
1861 lbq_desc->p.pg_chunk.offset, in ql_build_rx_skb()
1872 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_build_rx_skb()
1880 dma_unmap_addr(lbq_desc, in ql_build_rx_skb()
1882 dma_unmap_len(lbq_desc, maplen), in ql_build_rx_skb()
1889 lbq_desc->p.pg_chunk.page, in ql_build_rx_skb()
1890 lbq_desc->p.pg_chunk.offset, in ql_build_rx_skb()
1896 lbq_desc->p.pg_chunk.va, in ql_build_rx_skb()
1936 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_build_rx_skb()
1944 lbq_desc->p.pg_chunk.page, in ql_build_rx_skb()
1945 lbq_desc->p.pg_chunk.offset, in ql_build_rx_skb()
1953 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va, in ql_build_rx_skb()
2831 struct bq_desc *lbq_desc; in ql_free_lbq_buffers() local
2838 lbq_desc = &rx_ring->lbq[curr_idx]; in ql_free_lbq_buffers()
2840 if (lbq_desc->p.pg_chunk.last_flag) { in ql_free_lbq_buffers()
2842 lbq_desc->p.pg_chunk.map, in ql_free_lbq_buffers()
2845 lbq_desc->p.pg_chunk.last_flag = 0; in ql_free_lbq_buffers()
2848 put_page(lbq_desc->p.pg_chunk.page); in ql_free_lbq_buffers()
2849 lbq_desc->p.pg_chunk.page = NULL; in ql_free_lbq_buffers()
2919 struct bq_desc *lbq_desc; in ql_init_lbq_ring() local
2924 lbq_desc = &rx_ring->lbq[i]; in ql_init_lbq_ring()
2925 memset(lbq_desc, 0, sizeof(*lbq_desc)); in ql_init_lbq_ring()
2926 lbq_desc->index = i; in ql_init_lbq_ring()
2927 lbq_desc->addr = bq; in ql_init_lbq_ring()