• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5 
6 #include "ena_eth_com.h"
7 
ena_com_get_next_rx_cdesc(struct ena_com_io_cq * io_cq)8 static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
9 	struct ena_com_io_cq *io_cq)
10 {
11 	struct ena_eth_io_rx_cdesc_base *cdesc;
12 	u16 expected_phase, head_masked;
13 	u16 desc_phase;
14 
15 	head_masked = io_cq->head & (io_cq->q_depth - 1);
16 	expected_phase = io_cq->phase;
17 
18 	cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
19 			+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
20 
21 	desc_phase = (READ_ONCE(cdesc->status) &
22 		      ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
23 		     ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
24 
25 	if (desc_phase != expected_phase)
26 		return NULL;
27 
28 	/* Make sure we read the rest of the descriptor after the phase bit
29 	 * has been read
30 	 */
31 	dma_rmb();
32 
33 	return cdesc;
34 }
35 
get_sq_desc_regular_queue(struct ena_com_io_sq * io_sq)36 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
37 {
38 	u16 tail_masked;
39 	u32 offset;
40 
41 	tail_masked = io_sq->tail & (io_sq->q_depth - 1);
42 
43 	offset = tail_masked * io_sq->desc_entry_size;
44 
45 	return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
46 }
47 
ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq * io_sq,u8 * bounce_buffer)48 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
49 						     u8 *bounce_buffer)
50 {
51 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
52 
53 	u16 dst_tail_mask;
54 	u32 dst_offset;
55 
56 	dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
57 	dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
58 
59 	if (is_llq_max_tx_burst_exists(io_sq)) {
60 		if (unlikely(!io_sq->entries_in_tx_burst_left)) {
61 			netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
62 				   "Error: trying to send more packets than tx burst allows\n");
63 			return -ENOSPC;
64 		}
65 
66 		io_sq->entries_in_tx_burst_left--;
67 		netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
68 			   "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
69 			   io_sq->qid, io_sq->entries_in_tx_burst_left);
70 	}
71 
72 	/* Make sure everything was written into the bounce buffer before
73 	 * writing the bounce buffer to the device
74 	 */
75 	wmb();
76 
77 	/* The line is completed. Copy it to dev */
78 	__iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
79 			 bounce_buffer, (llq_info->desc_list_entry_size) / 8);
80 
81 	io_sq->tail++;
82 
83 	/* Switch phase bit in case of wrap around */
84 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
85 		io_sq->phase ^= 1;
86 
87 	return 0;
88 }
89 
ena_com_write_header_to_bounce(struct ena_com_io_sq * io_sq,u8 * header_src,u16 header_len)90 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
91 						 u8 *header_src,
92 						 u16 header_len)
93 {
94 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
95 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
96 	u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
97 	u16 header_offset;
98 
99 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
100 		return 0;
101 
102 	header_offset =
103 		llq_info->descs_num_before_header * io_sq->desc_entry_size;
104 
105 	if (unlikely((header_offset + header_len) >
106 		     llq_info->desc_list_entry_size)) {
107 		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
108 			   "Trying to write header larger than llq entry can accommodate\n");
109 		return -EFAULT;
110 	}
111 
112 	if (unlikely(!bounce_buffer)) {
113 		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
114 			   "Bounce buffer is NULL\n");
115 		return -EFAULT;
116 	}
117 
118 	memcpy(bounce_buffer + header_offset, header_src, header_len);
119 
120 	return 0;
121 }
122 
get_sq_desc_llq(struct ena_com_io_sq * io_sq)123 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
124 {
125 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
126 	u8 *bounce_buffer;
127 	void *sq_desc;
128 
129 	bounce_buffer = pkt_ctrl->curr_bounce_buf;
130 
131 	if (unlikely(!bounce_buffer)) {
132 		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
133 			   "Bounce buffer is NULL\n");
134 		return NULL;
135 	}
136 
137 	sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
138 	pkt_ctrl->idx++;
139 	pkt_ctrl->descs_left_in_line--;
140 
141 	return sq_desc;
142 }
143 
ena_com_close_bounce_buffer(struct ena_com_io_sq * io_sq)144 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
145 {
146 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
147 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
148 	int rc;
149 
150 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
151 		return 0;
152 
153 	/* bounce buffer was used, so write it and get a new one */
154 	if (likely(pkt_ctrl->idx)) {
155 		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
156 							pkt_ctrl->curr_bounce_buf);
157 		if (unlikely(rc)) {
158 			netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
159 				   "Failed to write bounce buffer to device\n");
160 			return rc;
161 		}
162 
163 		pkt_ctrl->curr_bounce_buf =
164 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
165 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
166 		       0x0, llq_info->desc_list_entry_size);
167 	}
168 
169 	pkt_ctrl->idx = 0;
170 	pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
171 	return 0;
172 }
173 
get_sq_desc(struct ena_com_io_sq * io_sq)174 static void *get_sq_desc(struct ena_com_io_sq *io_sq)
175 {
176 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
177 		return get_sq_desc_llq(io_sq);
178 
179 	return get_sq_desc_regular_queue(io_sq);
180 }
181 
ena_com_sq_update_llq_tail(struct ena_com_io_sq * io_sq)182 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
183 {
184 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
185 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
186 	int rc;
187 
188 	if (!pkt_ctrl->descs_left_in_line) {
189 		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
190 							pkt_ctrl->curr_bounce_buf);
191 		if (unlikely(rc)) {
192 			netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
193 				   "Failed to write bounce buffer to device\n");
194 			return rc;
195 		}
196 
197 		pkt_ctrl->curr_bounce_buf =
198 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
199 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
200 		       0x0, llq_info->desc_list_entry_size);
201 
202 		pkt_ctrl->idx = 0;
203 		if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
204 			pkt_ctrl->descs_left_in_line = 1;
205 		else
206 			pkt_ctrl->descs_left_in_line =
207 			llq_info->desc_list_entry_size / io_sq->desc_entry_size;
208 	}
209 
210 	return 0;
211 }
212 
ena_com_sq_update_tail(struct ena_com_io_sq * io_sq)213 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
214 {
215 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
216 		return ena_com_sq_update_llq_tail(io_sq);
217 
218 	io_sq->tail++;
219 
220 	/* Switch phase bit in case of wrap around */
221 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
222 		io_sq->phase ^= 1;
223 
224 	return 0;
225 }
226 
227 static struct ena_eth_io_rx_cdesc_base *
ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq * io_cq,u16 idx)228 	ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
229 {
230 	idx &= (io_cq->q_depth - 1);
231 	return (struct ena_eth_io_rx_cdesc_base *)
232 		((uintptr_t)io_cq->cdesc_addr.virt_addr +
233 		idx * io_cq->cdesc_entry_size_in_bytes);
234 }
235 
ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq * io_cq,u16 * first_cdesc_idx)236 static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
237 					   u16 *first_cdesc_idx)
238 {
239 	struct ena_eth_io_rx_cdesc_base *cdesc;
240 	u16 count = 0, head_masked;
241 	u32 last = 0;
242 
243 	do {
244 		cdesc = ena_com_get_next_rx_cdesc(io_cq);
245 		if (!cdesc)
246 			break;
247 
248 		ena_com_cq_inc_head(io_cq);
249 		count++;
250 		last = (READ_ONCE(cdesc->status) &
251 			ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
252 		       ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
253 	} while (!last);
254 
255 	if (last) {
256 		*first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
257 		count += io_cq->cur_rx_pkt_cdesc_count;
258 
259 		head_masked = io_cq->head & (io_cq->q_depth - 1);
260 
261 		io_cq->cur_rx_pkt_cdesc_count = 0;
262 		io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
263 
264 		netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
265 			   "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
266 			   io_cq->qid, *first_cdesc_idx, count);
267 	} else {
268 		io_cq->cur_rx_pkt_cdesc_count += count;
269 		count = 0;
270 	}
271 
272 	return count;
273 }
274 
ena_com_create_meta(struct ena_com_io_sq * io_sq,struct ena_com_tx_meta * ena_meta)275 static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
276 			       struct ena_com_tx_meta *ena_meta)
277 {
278 	struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
279 
280 	meta_desc = get_sq_desc(io_sq);
281 	if (unlikely(!meta_desc))
282 		return -EFAULT;
283 
284 	memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
285 
286 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
287 
288 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
289 
290 	/* bits 0-9 of the mss */
291 	meta_desc->word2 |= ((u32)ena_meta->mss <<
292 		ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
293 		ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
294 	/* bits 10-13 of the mss */
295 	meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
296 		ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
297 		ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
298 
299 	/* Extended meta desc */
300 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
301 	meta_desc->len_ctrl |= ((u32)io_sq->phase <<
302 		ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
303 		ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
304 
305 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
306 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
307 
308 	meta_desc->word2 |= ena_meta->l3_hdr_len &
309 		ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
310 	meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
311 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
312 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
313 
314 	meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
315 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
316 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
317 
318 	return ena_com_sq_update_tail(io_sq);
319 }
320 
ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx,bool * have_meta)321 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
322 						 struct ena_com_tx_ctx *ena_tx_ctx,
323 						 bool *have_meta)
324 {
325 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
326 
327 	/* When disable meta caching is set, don't bother to save the meta and
328 	 * compare it to the stored version, just create the meta
329 	 */
330 	if (io_sq->disable_meta_caching) {
331 		*have_meta = true;
332 		return ena_com_create_meta(io_sq, ena_meta);
333 	}
334 
335 	if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
336 		*have_meta = true;
337 		/* Cache the meta desc */
338 		memcpy(&io_sq->cached_tx_meta, ena_meta,
339 		       sizeof(struct ena_com_tx_meta));
340 		return ena_com_create_meta(io_sq, ena_meta);
341 	}
342 
343 	*have_meta = false;
344 	return 0;
345 }
346 
ena_com_rx_set_flags(struct ena_com_io_cq * io_cq,struct ena_com_rx_ctx * ena_rx_ctx,struct ena_eth_io_rx_cdesc_base * cdesc)347 static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
348 				 struct ena_com_rx_ctx *ena_rx_ctx,
349 				 struct ena_eth_io_rx_cdesc_base *cdesc)
350 {
351 	ena_rx_ctx->l3_proto = cdesc->status &
352 		ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
353 	ena_rx_ctx->l4_proto =
354 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
355 		ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
356 	ena_rx_ctx->l3_csum_err =
357 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
358 		ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
359 	ena_rx_ctx->l4_csum_err =
360 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
361 		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
362 	ena_rx_ctx->l4_csum_checked =
363 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
364 		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
365 	ena_rx_ctx->hash = cdesc->hash;
366 	ena_rx_ctx->frag =
367 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
368 		ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
369 
370 	netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
371 		   "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
372 		   ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
373 		   ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
374 		   ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
375 }
376 
377 /*****************************************************************************/
378 /*****************************     API      **********************************/
379 /*****************************************************************************/
380 
ena_com_prepare_tx(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx,int * nb_hw_desc)381 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
382 		       struct ena_com_tx_ctx *ena_tx_ctx,
383 		       int *nb_hw_desc)
384 {
385 	struct ena_eth_io_tx_desc *desc = NULL;
386 	struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
387 	void *buffer_to_push = ena_tx_ctx->push_header;
388 	u16 header_len = ena_tx_ctx->header_len;
389 	u16 num_bufs = ena_tx_ctx->num_bufs;
390 	u16 start_tail = io_sq->tail;
391 	int i, rc;
392 	bool have_meta;
393 	u64 addr_hi;
394 
395 	WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
396 
397 	/* num_bufs +1 for potential meta desc */
398 	if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
399 		netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
400 			   "Not enough space in the tx queue\n");
401 		return -ENOMEM;
402 	}
403 
404 	if (unlikely(header_len > io_sq->tx_max_header_size)) {
405 		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
406 			   "Header size is too large %d max header: %d\n",
407 			   header_len, io_sq->tx_max_header_size);
408 		return -EINVAL;
409 	}
410 
411 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
412 		     !buffer_to_push)) {
413 		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
414 			   "Push header wasn't provided in LLQ mode\n");
415 		return -EINVAL;
416 	}
417 
418 	rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
419 	if (unlikely(rc))
420 		return rc;
421 
422 	rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
423 	if (unlikely(rc)) {
424 		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
425 			   "Failed to create and store tx meta desc\n");
426 		return rc;
427 	}
428 
429 	/* If the caller doesn't want to send packets */
430 	if (unlikely(!num_bufs && !header_len)) {
431 		rc = ena_com_close_bounce_buffer(io_sq);
432 		if (rc)
433 			netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
434 				   "Failed to write buffers to LLQ\n");
435 		*nb_hw_desc = io_sq->tail - start_tail;
436 		return rc;
437 	}
438 
439 	desc = get_sq_desc(io_sq);
440 	if (unlikely(!desc))
441 		return -EFAULT;
442 	memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
443 
444 	/* Set first desc when we don't have meta descriptor */
445 	if (!have_meta)
446 		desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
447 
448 	desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
449 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
450 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
451 	desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
452 		ENA_ETH_IO_TX_DESC_PHASE_MASK;
453 
454 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
455 
456 	/* Bits 0-9 */
457 	desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
458 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
459 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
460 
461 	desc->meta_ctrl |= (ena_tx_ctx->df <<
462 		ENA_ETH_IO_TX_DESC_DF_SHIFT) &
463 		ENA_ETH_IO_TX_DESC_DF_MASK;
464 
465 	/* Bits 10-15 */
466 	desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
467 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
468 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
469 
470 	if (ena_tx_ctx->meta_valid) {
471 		desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
472 			ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
473 			ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
474 		desc->meta_ctrl |= ena_tx_ctx->l3_proto &
475 			ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
476 		desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
477 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
478 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
479 		desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
480 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
481 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
482 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
483 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
484 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
485 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
486 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
487 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
488 	}
489 
490 	for (i = 0; i < num_bufs; i++) {
491 		/* The first desc share the same desc as the header */
492 		if (likely(i != 0)) {
493 			rc = ena_com_sq_update_tail(io_sq);
494 			if (unlikely(rc)) {
495 				netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
496 					   "Failed to update sq tail\n");
497 				return rc;
498 			}
499 
500 			desc = get_sq_desc(io_sq);
501 			if (unlikely(!desc))
502 				return -EFAULT;
503 
504 			memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
505 
506 			desc->len_ctrl |= ((u32)io_sq->phase <<
507 				ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
508 				ENA_ETH_IO_TX_DESC_PHASE_MASK;
509 		}
510 
511 		desc->len_ctrl |= ena_bufs->len &
512 			ENA_ETH_IO_TX_DESC_LENGTH_MASK;
513 
514 		addr_hi = ((ena_bufs->paddr &
515 			GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
516 
517 		desc->buff_addr_lo = (u32)ena_bufs->paddr;
518 		desc->buff_addr_hi_hdr_sz |= addr_hi &
519 			ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
520 		ena_bufs++;
521 	}
522 
523 	/* set the last desc indicator */
524 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
525 
526 	rc = ena_com_sq_update_tail(io_sq);
527 	if (unlikely(rc)) {
528 		netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
529 			   "Failed to update sq tail of the last descriptor\n");
530 		return rc;
531 	}
532 
533 	rc = ena_com_close_bounce_buffer(io_sq);
534 
535 	*nb_hw_desc = io_sq->tail - start_tail;
536 	return rc;
537 }
538 
ena_com_rx_pkt(struct ena_com_io_cq * io_cq,struct ena_com_io_sq * io_sq,struct ena_com_rx_ctx * ena_rx_ctx)539 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
540 		   struct ena_com_io_sq *io_sq,
541 		   struct ena_com_rx_ctx *ena_rx_ctx)
542 {
543 	struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
544 	struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
545 	u16 q_depth = io_cq->q_depth;
546 	u16 cdesc_idx = 0;
547 	u16 nb_hw_desc;
548 	u16 i = 0;
549 
550 	WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
551 
552 	nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
553 	if (nb_hw_desc == 0) {
554 		ena_rx_ctx->descs = nb_hw_desc;
555 		return 0;
556 	}
557 
558 	netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
559 		   "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
560 		   nb_hw_desc);
561 
562 	if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
563 		netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
564 			   "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
565 			   ena_rx_ctx->max_bufs);
566 		return -ENOSPC;
567 	}
568 
569 	cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
570 	ena_rx_ctx->pkt_offset = cdesc->offset;
571 
572 	do {
573 		ena_buf[i].len = cdesc->length;
574 		ena_buf[i].req_id = cdesc->req_id;
575 		if (unlikely(ena_buf[i].req_id >= q_depth))
576 			return -EIO;
577 
578 		if (++i >= nb_hw_desc)
579 			break;
580 
581 		cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
582 
583 	} while (1);
584 
585 	/* Update SQ head ptr */
586 	io_sq->next_to_comp += nb_hw_desc;
587 
588 	netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
589 		   "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
590 		   io_sq->qid, io_sq->next_to_comp);
591 
592 	/* Get rx flags from the last pkt */
593 	ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
594 
595 	ena_rx_ctx->descs = nb_hw_desc;
596 
597 	return 0;
598 }
599 
ena_com_add_single_rx_desc(struct ena_com_io_sq * io_sq,struct ena_com_buf * ena_buf,u16 req_id)600 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
601 			       struct ena_com_buf *ena_buf,
602 			       u16 req_id)
603 {
604 	struct ena_eth_io_rx_desc *desc;
605 
606 	WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
607 
608 	if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
609 		return -ENOSPC;
610 
611 	desc = get_sq_desc(io_sq);
612 	if (unlikely(!desc))
613 		return -EFAULT;
614 
615 	memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
616 
617 	desc->length = ena_buf->len;
618 
619 	desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
620 		     ENA_ETH_IO_RX_DESC_LAST_MASK |
621 		     ENA_ETH_IO_RX_DESC_COMP_REQ_MASK |
622 		     (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK);
623 
624 	desc->req_id = req_id;
625 
626 	netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
627 		   "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
628 		   __func__, io_sq->qid, req_id);
629 
630 	desc->buff_addr_lo = (u32)ena_buf->paddr;
631 	desc->buff_addr_hi =
632 		((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
633 
634 	return ena_com_sq_update_tail(io_sq);
635 }
636 
ena_com_cq_empty(struct ena_com_io_cq * io_cq)637 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
638 {
639 	struct ena_eth_io_rx_cdesc_base *cdesc;
640 
641 	cdesc = ena_com_get_next_rx_cdesc(io_cq);
642 	if (cdesc)
643 		return false;
644 	else
645 		return true;
646 }
647