Searched refs:pg_chunk (Results 1 – 4 of 4) sorted by relevance
120 struct fl_pg_chunk pg_chunk; member355 if (q->use_pages && d->pg_chunk.page) { in clear_rx_desc()356 (*d->pg_chunk.p_cnt)--; in clear_rx_desc()357 if (!*d->pg_chunk.p_cnt) in clear_rx_desc()359 d->pg_chunk.mapping, in clear_rx_desc()362 put_page(d->pg_chunk.page); in clear_rx_desc()363 d->pg_chunk.page = NULL; in clear_rx_desc()393 if (q->pg_chunk.page) { in free_rx_bufs()394 __free_pages(q->pg_chunk.page, q->order); in free_rx_bufs()395 q->pg_chunk.page = NULL; in free_rx_bufs()[all …]
111 struct fl_pg_chunk pg_chunk;/* page chunk cache */ member
1054 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) in ql_get_curr_lchunk()1057 lbq_desc->p.pg_chunk.map, in ql_get_curr_lchunk()1093 if (!rx_ring->pg_chunk.page) { in ql_get_next_chunk()1095 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP | in ql_get_next_chunk()1098 if (unlikely(!rx_ring->pg_chunk.page)) { in ql_get_next_chunk()1103 rx_ring->pg_chunk.offset = 0; in ql_get_next_chunk()1104 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page, in ql_get_next_chunk()1108 __free_pages(rx_ring->pg_chunk.page, in ql_get_next_chunk()1110 rx_ring->pg_chunk.page = NULL; in ql_get_next_chunk()1115 rx_ring->pg_chunk.map = map; in ql_get_next_chunk()[all …]
1371 struct page_chunk pg_chunk; member1442 struct page_chunk pg_chunk; /* current page for chunks */ member