1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6 #include "ena_eth_com.h"
7
ena_com_get_next_rx_cdesc(struct ena_com_io_cq * io_cq)8 static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
9 struct ena_com_io_cq *io_cq)
10 {
11 struct ena_eth_io_rx_cdesc_base *cdesc;
12 u16 expected_phase, head_masked;
13 u16 desc_phase;
14
15 head_masked = io_cq->head & (io_cq->q_depth - 1);
16 expected_phase = io_cq->phase;
17
18 cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
19 + (head_masked * io_cq->cdesc_entry_size_in_bytes));
20
21 desc_phase = (READ_ONCE(cdesc->status) &
22 ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
23 ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
24
25 if (desc_phase != expected_phase)
26 return NULL;
27
28 /* Make sure we read the rest of the descriptor after the phase bit
29 * has been read
30 */
31 dma_rmb();
32
33 return cdesc;
34 }
35
get_sq_desc_regular_queue(struct ena_com_io_sq * io_sq)36 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
37 {
38 u16 tail_masked;
39 u32 offset;
40
41 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
42
43 offset = tail_masked * io_sq->desc_entry_size;
44
45 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
46 }
47
ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq * io_sq,u8 * bounce_buffer)48 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
49 u8 *bounce_buffer)
50 {
51 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
52
53 u16 dst_tail_mask;
54 u32 dst_offset;
55
56 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
57 dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
58
59 if (is_llq_max_tx_burst_exists(io_sq)) {
60 if (unlikely(!io_sq->entries_in_tx_burst_left)) {
61 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
62 "Error: trying to send more packets than tx burst allows\n");
63 return -ENOSPC;
64 }
65
66 io_sq->entries_in_tx_burst_left--;
67 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
68 "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
69 io_sq->qid, io_sq->entries_in_tx_burst_left);
70 }
71
72 /* Make sure everything was written into the bounce buffer before
73 * writing the bounce buffer to the device
74 */
75 wmb();
76
77 /* The line is completed. Copy it to dev */
78 __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
79 bounce_buffer, (llq_info->desc_list_entry_size) / 8);
80
81 io_sq->tail++;
82
83 /* Switch phase bit in case of wrap around */
84 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
85 io_sq->phase ^= 1;
86
87 return 0;
88 }
89
ena_com_write_header_to_bounce(struct ena_com_io_sq * io_sq,u8 * header_src,u16 header_len)90 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
91 u8 *header_src,
92 u16 header_len)
93 {
94 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
95 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
96 u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
97 u16 header_offset;
98
99 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
100 return 0;
101
102 header_offset =
103 llq_info->descs_num_before_header * io_sq->desc_entry_size;
104
105 if (unlikely((header_offset + header_len) >
106 llq_info->desc_list_entry_size)) {
107 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
108 "Trying to write header larger than llq entry can accommodate\n");
109 return -EFAULT;
110 }
111
112 if (unlikely(!bounce_buffer)) {
113 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
114 "Bounce buffer is NULL\n");
115 return -EFAULT;
116 }
117
118 memcpy(bounce_buffer + header_offset, header_src, header_len);
119
120 return 0;
121 }
122
get_sq_desc_llq(struct ena_com_io_sq * io_sq)123 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
124 {
125 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
126 u8 *bounce_buffer;
127 void *sq_desc;
128
129 bounce_buffer = pkt_ctrl->curr_bounce_buf;
130
131 if (unlikely(!bounce_buffer)) {
132 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
133 "Bounce buffer is NULL\n");
134 return NULL;
135 }
136
137 sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
138 pkt_ctrl->idx++;
139 pkt_ctrl->descs_left_in_line--;
140
141 return sq_desc;
142 }
143
ena_com_close_bounce_buffer(struct ena_com_io_sq * io_sq)144 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
145 {
146 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
147 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
148 int rc;
149
150 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
151 return 0;
152
153 /* bounce buffer was used, so write it and get a new one */
154 if (likely(pkt_ctrl->idx)) {
155 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
156 pkt_ctrl->curr_bounce_buf);
157 if (unlikely(rc)) {
158 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
159 "Failed to write bounce buffer to device\n");
160 return rc;
161 }
162
163 pkt_ctrl->curr_bounce_buf =
164 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
165 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
166 0x0, llq_info->desc_list_entry_size);
167 }
168
169 pkt_ctrl->idx = 0;
170 pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
171 return 0;
172 }
173
get_sq_desc(struct ena_com_io_sq * io_sq)174 static void *get_sq_desc(struct ena_com_io_sq *io_sq)
175 {
176 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
177 return get_sq_desc_llq(io_sq);
178
179 return get_sq_desc_regular_queue(io_sq);
180 }
181
ena_com_sq_update_llq_tail(struct ena_com_io_sq * io_sq)182 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
183 {
184 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
185 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
186 int rc;
187
188 if (!pkt_ctrl->descs_left_in_line) {
189 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
190 pkt_ctrl->curr_bounce_buf);
191 if (unlikely(rc)) {
192 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
193 "Failed to write bounce buffer to device\n");
194 return rc;
195 }
196
197 pkt_ctrl->curr_bounce_buf =
198 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
199 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
200 0x0, llq_info->desc_list_entry_size);
201
202 pkt_ctrl->idx = 0;
203 if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
204 pkt_ctrl->descs_left_in_line = 1;
205 else
206 pkt_ctrl->descs_left_in_line =
207 llq_info->desc_list_entry_size / io_sq->desc_entry_size;
208 }
209
210 return 0;
211 }
212
ena_com_sq_update_tail(struct ena_com_io_sq * io_sq)213 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
214 {
215 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
216 return ena_com_sq_update_llq_tail(io_sq);
217
218 io_sq->tail++;
219
220 /* Switch phase bit in case of wrap around */
221 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
222 io_sq->phase ^= 1;
223
224 return 0;
225 }
226
227 static struct ena_eth_io_rx_cdesc_base *
ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq * io_cq,u16 idx)228 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
229 {
230 idx &= (io_cq->q_depth - 1);
231 return (struct ena_eth_io_rx_cdesc_base *)
232 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
233 idx * io_cq->cdesc_entry_size_in_bytes);
234 }
235
ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq * io_cq,u16 * first_cdesc_idx)236 static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
237 u16 *first_cdesc_idx)
238 {
239 struct ena_eth_io_rx_cdesc_base *cdesc;
240 u16 count = 0, head_masked;
241 u32 last = 0;
242
243 do {
244 cdesc = ena_com_get_next_rx_cdesc(io_cq);
245 if (!cdesc)
246 break;
247
248 ena_com_cq_inc_head(io_cq);
249 count++;
250 last = (READ_ONCE(cdesc->status) &
251 ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
252 ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
253 } while (!last);
254
255 if (last) {
256 *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
257 count += io_cq->cur_rx_pkt_cdesc_count;
258
259 head_masked = io_cq->head & (io_cq->q_depth - 1);
260
261 io_cq->cur_rx_pkt_cdesc_count = 0;
262 io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
263
264 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
265 "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
266 io_cq->qid, *first_cdesc_idx, count);
267 } else {
268 io_cq->cur_rx_pkt_cdesc_count += count;
269 count = 0;
270 }
271
272 return count;
273 }
274
ena_com_create_meta(struct ena_com_io_sq * io_sq,struct ena_com_tx_meta * ena_meta)275 static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
276 struct ena_com_tx_meta *ena_meta)
277 {
278 struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
279
280 meta_desc = get_sq_desc(io_sq);
281 if (unlikely(!meta_desc))
282 return -EFAULT;
283
284 memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
285
286 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
287
288 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
289
290 /* bits 0-9 of the mss */
291 meta_desc->word2 |= ((u32)ena_meta->mss <<
292 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
293 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
294 /* bits 10-13 of the mss */
295 meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
296 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
297 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
298
299 /* Extended meta desc */
300 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
301 meta_desc->len_ctrl |= ((u32)io_sq->phase <<
302 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
303 ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
304
305 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
306 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
307
308 meta_desc->word2 |= ena_meta->l3_hdr_len &
309 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
310 meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
311 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
312 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
313
314 meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
315 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
316 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
317
318 return ena_com_sq_update_tail(io_sq);
319 }
320
ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx,bool * have_meta)321 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
322 struct ena_com_tx_ctx *ena_tx_ctx,
323 bool *have_meta)
324 {
325 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
326
327 /* When disable meta caching is set, don't bother to save the meta and
328 * compare it to the stored version, just create the meta
329 */
330 if (io_sq->disable_meta_caching) {
331 if (unlikely(!ena_tx_ctx->meta_valid))
332 return -EINVAL;
333
334 *have_meta = true;
335 return ena_com_create_meta(io_sq, ena_meta);
336 }
337
338 if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
339 *have_meta = true;
340 /* Cache the meta desc */
341 memcpy(&io_sq->cached_tx_meta, ena_meta,
342 sizeof(struct ena_com_tx_meta));
343 return ena_com_create_meta(io_sq, ena_meta);
344 }
345
346 *have_meta = false;
347 return 0;
348 }
349
ena_com_rx_set_flags(struct ena_com_io_cq * io_cq,struct ena_com_rx_ctx * ena_rx_ctx,struct ena_eth_io_rx_cdesc_base * cdesc)350 static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
351 struct ena_com_rx_ctx *ena_rx_ctx,
352 struct ena_eth_io_rx_cdesc_base *cdesc)
353 {
354 ena_rx_ctx->l3_proto = cdesc->status &
355 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
356 ena_rx_ctx->l4_proto =
357 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
358 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
359 ena_rx_ctx->l3_csum_err =
360 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
361 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
362 ena_rx_ctx->l4_csum_err =
363 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
364 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
365 ena_rx_ctx->l4_csum_checked =
366 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
367 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
368 ena_rx_ctx->hash = cdesc->hash;
369 ena_rx_ctx->frag =
370 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
371 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
372
373 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
374 "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
375 ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
376 ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
377 ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
378 }
379
380 /*****************************************************************************/
381 /***************************** API **********************************/
382 /*****************************************************************************/
383
ena_com_prepare_tx(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx,int * nb_hw_desc)384 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
385 struct ena_com_tx_ctx *ena_tx_ctx,
386 int *nb_hw_desc)
387 {
388 struct ena_eth_io_tx_desc *desc = NULL;
389 struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
390 void *buffer_to_push = ena_tx_ctx->push_header;
391 u16 header_len = ena_tx_ctx->header_len;
392 u16 num_bufs = ena_tx_ctx->num_bufs;
393 u16 start_tail = io_sq->tail;
394 int i, rc;
395 bool have_meta;
396 u64 addr_hi;
397
398 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
399
400 /* num_bufs +1 for potential meta desc */
401 if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
402 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
403 "Not enough space in the tx queue\n");
404 return -ENOMEM;
405 }
406
407 if (unlikely(header_len > io_sq->tx_max_header_size)) {
408 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
409 "Header size is too large %d max header: %d\n",
410 header_len, io_sq->tx_max_header_size);
411 return -EINVAL;
412 }
413
414 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
415 !buffer_to_push)) {
416 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
417 "Push header wasn't provided in LLQ mode\n");
418 return -EINVAL;
419 }
420
421 rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
422 if (unlikely(rc))
423 return rc;
424
425 rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
426 if (unlikely(rc)) {
427 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
428 "Failed to create and store tx meta desc\n");
429 return rc;
430 }
431
432 /* If the caller doesn't want to send packets */
433 if (unlikely(!num_bufs && !header_len)) {
434 rc = ena_com_close_bounce_buffer(io_sq);
435 if (rc)
436 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
437 "Failed to write buffers to LLQ\n");
438 *nb_hw_desc = io_sq->tail - start_tail;
439 return rc;
440 }
441
442 desc = get_sq_desc(io_sq);
443 if (unlikely(!desc))
444 return -EFAULT;
445 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
446
447 /* Set first desc when we don't have meta descriptor */
448 if (!have_meta)
449 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
450
451 desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
452 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
453 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
454 desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
455 ENA_ETH_IO_TX_DESC_PHASE_MASK;
456
457 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
458
459 /* Bits 0-9 */
460 desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
461 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
462 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
463
464 desc->meta_ctrl |= (ena_tx_ctx->df <<
465 ENA_ETH_IO_TX_DESC_DF_SHIFT) &
466 ENA_ETH_IO_TX_DESC_DF_MASK;
467
468 /* Bits 10-15 */
469 desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
470 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
471 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
472
473 if (ena_tx_ctx->meta_valid) {
474 desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
475 ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
476 ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
477 desc->meta_ctrl |= ena_tx_ctx->l3_proto &
478 ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
479 desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
480 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
481 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
482 desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
483 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
484 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
485 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
486 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
487 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
488 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
489 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
490 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
491 }
492
493 for (i = 0; i < num_bufs; i++) {
494 /* The first desc share the same desc as the header */
495 if (likely(i != 0)) {
496 rc = ena_com_sq_update_tail(io_sq);
497 if (unlikely(rc)) {
498 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
499 "Failed to update sq tail\n");
500 return rc;
501 }
502
503 desc = get_sq_desc(io_sq);
504 if (unlikely(!desc))
505 return -EFAULT;
506
507 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
508
509 desc->len_ctrl |= ((u32)io_sq->phase <<
510 ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
511 ENA_ETH_IO_TX_DESC_PHASE_MASK;
512 }
513
514 desc->len_ctrl |= ena_bufs->len &
515 ENA_ETH_IO_TX_DESC_LENGTH_MASK;
516
517 addr_hi = ((ena_bufs->paddr &
518 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
519
520 desc->buff_addr_lo = (u32)ena_bufs->paddr;
521 desc->buff_addr_hi_hdr_sz |= addr_hi &
522 ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
523 ena_bufs++;
524 }
525
526 /* set the last desc indicator */
527 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
528
529 rc = ena_com_sq_update_tail(io_sq);
530 if (unlikely(rc)) {
531 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
532 "Failed to update sq tail of the last descriptor\n");
533 return rc;
534 }
535
536 rc = ena_com_close_bounce_buffer(io_sq);
537
538 *nb_hw_desc = io_sq->tail - start_tail;
539 return rc;
540 }
541
ena_com_rx_pkt(struct ena_com_io_cq * io_cq,struct ena_com_io_sq * io_sq,struct ena_com_rx_ctx * ena_rx_ctx)542 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
543 struct ena_com_io_sq *io_sq,
544 struct ena_com_rx_ctx *ena_rx_ctx)
545 {
546 struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
547 struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
548 u16 q_depth = io_cq->q_depth;
549 u16 cdesc_idx = 0;
550 u16 nb_hw_desc;
551 u16 i = 0;
552
553 WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
554
555 nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
556 if (nb_hw_desc == 0) {
557 ena_rx_ctx->descs = nb_hw_desc;
558 return 0;
559 }
560
561 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
562 "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
563 nb_hw_desc);
564
565 if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
566 netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
567 "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
568 ena_rx_ctx->max_bufs);
569 return -ENOSPC;
570 }
571
572 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
573 ena_rx_ctx->pkt_offset = cdesc->offset;
574
575 do {
576 ena_buf[i].len = cdesc->length;
577 ena_buf[i].req_id = cdesc->req_id;
578 if (unlikely(ena_buf[i].req_id >= q_depth))
579 return -EIO;
580
581 if (++i >= nb_hw_desc)
582 break;
583
584 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
585
586 } while (1);
587
588 /* Update SQ head ptr */
589 io_sq->next_to_comp += nb_hw_desc;
590
591 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
592 "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
593 io_sq->qid, io_sq->next_to_comp);
594
595 /* Get rx flags from the last pkt */
596 ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
597
598 ena_rx_ctx->descs = nb_hw_desc;
599
600 return 0;
601 }
602
ena_com_add_single_rx_desc(struct ena_com_io_sq * io_sq,struct ena_com_buf * ena_buf,u16 req_id)603 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
604 struct ena_com_buf *ena_buf,
605 u16 req_id)
606 {
607 struct ena_eth_io_rx_desc *desc;
608
609 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
610
611 if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
612 return -ENOSPC;
613
614 desc = get_sq_desc(io_sq);
615 if (unlikely(!desc))
616 return -EFAULT;
617
618 memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
619
620 desc->length = ena_buf->len;
621
622 desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
623 ENA_ETH_IO_RX_DESC_LAST_MASK |
624 ENA_ETH_IO_RX_DESC_COMP_REQ_MASK |
625 (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK);
626
627 desc->req_id = req_id;
628
629 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
630 "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
631 __func__, io_sq->qid, req_id);
632
633 desc->buff_addr_lo = (u32)ena_buf->paddr;
634 desc->buff_addr_hi =
635 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
636
637 return ena_com_sq_update_tail(io_sq);
638 }
639
ena_com_cq_empty(struct ena_com_io_cq * io_cq)640 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
641 {
642 struct ena_eth_io_rx_cdesc_base *cdesc;
643
644 cdesc = ena_com_get_next_rx_cdesc(io_cq);
645 if (cdesc)
646 return false;
647 else
648 return true;
649 }
650