Lines Matching refs:cqe
650 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_set_gro_params() argument
652 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); in qede_set_gro_params()
660 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - in qede_set_gro_params()
661 cqe->header_len; in qede_set_gro_params()
833 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_tpa_start() argument
835 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_start()
840 pad = cqe->placement_offset + rxq->rx_headroom; in qede_tpa_start()
843 le16_to_cpu(cqe->len_on_first_bd), in qede_tpa_start()
864 if ((le16_to_cpu(cqe->pars_flags.flags) >> in qede_tpa_start()
867 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); in qede_tpa_start()
871 qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash); in qede_tpa_start()
874 qede_set_gro_params(edev, tpa_info->skb, cqe); in qede_tpa_start()
877 if (likely(cqe->bw_ext_bd_len_list[0])) in qede_tpa_start()
878 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_start()
879 le16_to_cpu(cqe->bw_ext_bd_len_list[0])); in qede_tpa_start()
881 if (unlikely(cqe->bw_ext_bd_len_list[1])) { in qede_tpa_start()
959 struct eth_fast_path_rx_tpa_cont_cqe *cqe) in qede_tpa_cont() argument
963 for (i = 0; cqe->len_list[i]; i++) in qede_tpa_cont()
964 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_cont()
965 le16_to_cpu(cqe->len_list[i])); in qede_tpa_cont()
974 struct eth_fast_path_rx_tpa_end_cqe *cqe) in qede_tpa_end() argument
981 tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_end()
988 for (i = 0; cqe->len_list[i]; i++) in qede_tpa_end()
989 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_end()
990 le16_to_cpu(cqe->len_list[i])); in qede_tpa_end()
999 if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1)) in qede_tpa_end()
1002 cqe->num_of_bds, tpa_info->frag_id); in qede_tpa_end()
1003 if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len))) in qede_tpa_end()
1006 le16_to_cpu(cqe->total_packet_len), skb->len); in qede_tpa_end()
1015 NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs); in qede_tpa_end()
1064 static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, in qede_pkt_is_ip_fragmented() argument
1067 u8 tun_pars_flg = cqe->tunnel_pars_flags.flags; in qede_pkt_is_ip_fragmented()
1084 struct eth_fast_path_rx_reg_cqe *cqe, in qede_rx_xdp() argument
1163 qede_recycle_rx_bd_ring(rxq, cqe->bd_num); in qede_rx_xdp()
1172 struct eth_fast_path_rx_reg_cqe *cqe, in qede_rx_build_jumbo() argument
1175 u16 pkt_len = le16_to_cpu(cqe->pkt_len); in qede_rx_build_jumbo()
1183 for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) { in qede_rx_build_jumbo()
1226 union eth_rx_cqe *cqe, in qede_rx_process_tpa_cqe() argument
1231 qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start); in qede_rx_process_tpa_cqe()
1234 qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont); in qede_rx_process_tpa_cqe()
1237 return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end); in qede_rx_process_tpa_cqe()
1251 union eth_rx_cqe *cqe; in qede_rx_process_cqe() local
1258 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); in qede_rx_process_cqe()
1259 cqe_type = cqe->fast_path_regular.type; in qede_rx_process_cqe()
1265 sp_cqe = (struct eth_slow_path_rx_cqe *)cqe; in qede_rx_process_cqe()
1272 return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type); in qede_rx_process_cqe()
1280 fp_cqe = &cqe->fast_path_regular; in qede_rx_process_cqe()
1291 flags = cqe->fast_path_regular.pars_flags.flags; in qede_rx_process_cqe()
1331 qede_ptp_record_rx_ts(edev, cqe, skb); in qede_rx_process_cqe()