| /linux/drivers/net/can/dev/ |
| A D | rx-offload.c | 27 if (offload->inc) in can_rx_offload_le() 36 if (offload->inc) in can_rx_offload_inc() 153 skb = offload->mailbox_read(offload, n, ×tamp, drop); in can_rx_offload_offload_one() 182 can_rx_offload_le(offload, i, offload->mb_last); in can_rx_offload_irq_offload_timestamp() 317 skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue); in can_rx_offload_irq_finish() 338 skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue); in can_rx_offload_threaded_irq_finish() 356 offload->dev = dev; in can_rx_offload_init_queue() 379 offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read) in can_rx_offload_add_timestamp() 382 if (offload->mb_first < offload->mb_last) { in can_rx_offload_add_timestamp() 384 weight = offload->mb_last - offload->mb_first; in can_rx_offload_add_timestamp() [all …]
|
| /linux/kernel/bpf/ |
| A D | offload.c | 110 struct bpf_prog_offload *offload = prog->aux->offload; in __bpf_prog_offload_destroy() local 194 offload = kzalloc(sizeof(*offload), GFP_USER); in __bpf_prog_dev_bound_init() 219 prog->aux->offload = offload; in __bpf_prog_dev_bound_init() 302 offload = prog->aux->offload; in bpf_prog_offload_verifier_prep() 319 offload = env->prog->aux->offload; in bpf_prog_offload_verify_insn() 334 offload = env->prog->aux->offload; in bpf_prog_offload_finalize() 355 offload = env->prog->aux->offload; in bpf_prog_offload_replace_insn() 372 offload = env->prog->aux->offload; in bpf_prog_offload_remove_insns() 408 offload = prog->aux->offload; in bpf_prog_offload_translate() 703 offload = prog->aux->offload; in __bpf_offload_dev_match() [all …]
|
| /linux/include/linux/can/ |
| A D | rx-offload.h | 18 struct sk_buff *(*mailbox_read)(struct can_rx_offload *offload, 35 struct can_rx_offload *offload); 37 struct can_rx_offload *offload, 40 struct can_rx_offload *offload, 44 int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); 45 int can_rx_offload_queue_timestamp(struct can_rx_offload *offload, 50 int can_rx_offload_queue_tail(struct can_rx_offload *offload, 55 void can_rx_offload_irq_finish(struct can_rx_offload *offload); 57 void can_rx_offload_del(struct can_rx_offload *offload); 58 void can_rx_offload_enable(struct can_rx_offload *offload); [all …]
|
| /linux/drivers/gpu/drm/amd/display/dc/ |
| A D | dc_helper.c | 48 offload->should_burst_write = in submit_dmub_read_modify_write() 57 offload->reg_seq_count = 0; in submit_dmub_read_modify_write() 58 offload->same_addr_count = 0; in submit_dmub_read_modify_write() 74 offload->reg_seq_count = 0; in submit_dmub_burst_write() 86 offload->reg_seq_count = 0; in submit_dmub_reg_wait() 163 offload->reg_seq_count++; in dmub_reg_value_burst_set_pack() 192 if (offload->reg_seq_count) { in dmub_reg_value_pack() 194 offload->same_addr_count++; in dmub_reg_value_pack() 202 offload->reg_seq_count++; in dmub_reg_value_pack() 653 if (offload && offload->gather_in_progress) { in reg_sequence_start_execute() [all …]
|
| /linux/net/netfilter/ |
| A D | nf_flow_table_offload.c | 875 return nf_flow_offload_tuple(offload->flowtable, offload->flow, in flow_offload_tuple_add() 885 nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir, in flow_offload_tuple_del() 939 nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir, in flow_offload_tuple_stats() 956 offload->flow->timeout = max_t(u64, offload->flow->timeout, in flow_offload_work_stats() 996 kfree(offload); in flow_offload_work_handler() 1025 if (!offload) { in nf_flow_offload_work_alloc() 1030 offload->cmd = cmd; in nf_flow_offload_work_alloc() 1035 return offload; in nf_flow_offload_work_alloc() 1045 if (!offload) in nf_flow_offload_add() 1057 if (!offload) in nf_flow_offload_del() [all …]
|
| /linux/drivers/net/dsa/sja1105/ |
| A D | sja1105_tas.c | 33 offload = tas_data->offload[port]; in sja1105_tas_set_runtime_params() 34 if (!offload) in sja1105_tas_set_runtime_params() 212 if (tas_data->offload[port]) { in sja1105_init_scheduling() 278 offload = tas_data->offload[port]; in sja1105_init_scheduling() 279 if (!offload) in sja1105_init_scheduling() 289 offload->cycle_time, in sja1105_init_scheduling() 396 offload = tas_data->offload[port]; in sja1105_tas_check_conflicts() 397 if (!offload) in sja1105_tas_check_conflicts() 413 div_s64_rem(offload->base_time, offload->cycle_time, &rem); in sja1105_tas_check_conflicts() 894 offload = priv->tas_data.offload[port]; in sja1105_tas_teardown() [all …]
|
| /linux/Documentation/networking/ |
| A D | xfrm_device.rst | 21 hardware offload. 24 * IPsec crypto offload: 27 * IPsec packet offload: 37 like this for crypto offload: 43 offload dev eth4 dir in 45 and for packet offload 51 offload packet dev eth4 dir in 83 relevant to supported offload to make the offload available to the network 126 the skb and the intended offload state to ask the driver if the offload 131 Crypto offload mode: [all …]
|
| A D | tls-offload.rst | 4 Kernel TLS offload 28 (``ethtool`` flags ``tls-hw-tx-offload`` and ``tls-hw-rx-offload``). 63 .. kernel-figure:: tls-offload-layers.svg 64 :alt: TLS offload layers 82 network device is offload-capable and attempts the offload. In case offload 84 as if the offload was never tried. 268 .. kernel-figure:: tls-offload-reorder-good.svg 293 .. kernel-figure:: tls-offload-reorder-bad.svg 511 of the simplifying TLS offload. 514 necessary for TLS offload. [all …]
|
| A D | segmentation-offloads.rst | 12 to take advantage of segmentation offload capabilities of various NICs. 34 offload. For this reason TSO is normally disabled if the Tx checksum 35 offload for a given device is disabled. 37 In order to support TCP segmentation offload it is necessary to populate 55 UDP fragmentation offload allows a device to fragment an oversized UDP 57 fragmentation offload are the same as TSO. However the IPv4 ID for 70 for such instances an additional set of segmentation offload types were 102 header has requested a remote checksum offload. In this case the inner 110 Generic segmentation offload is a pure software offload that is meant to 124 Generic receive offload is the complement to GSO. Ideally any frame [all …]
|
| A D | nf_flowtable.rst | 9 also provides hardware offload support. The flowtable supports for the layer 3 16 path, from the second packet on, you might decide to offload the flow to the 60 |-----| | 'flow offload' rule | 105 The 'flow offload' action from the forward chain 'y' adds an entry to the 183 Hardware offload 187 means of the 'offload' flag in your flowtable definition, e.g. 194 flags offload; 200 a chance to offload the flow to the network device. 206 to the hardware offload datapath being used by the flow. 208 The flowtable hardware offload infrastructure also supports for the DSA [all …]
|
| /linux/drivers/net/wireless/ath/ath12k/ |
| A D | wow.c | 618 offload->self_ipv6_addr[i][13] = in ath12k_wow_generate_ns_mc_addr() 619 offload->ipv6_addr[i][13]; in ath12k_wow_generate_ns_mc_addr() 620 offload->self_ipv6_addr[i][14] = in ath12k_wow_generate_ns_mc_addr() 621 offload->ipv6_addr[i][14]; in ath12k_wow_generate_ns_mc_addr() 623 offload->ipv6_addr[i][15]; in ath12k_wow_generate_ns_mc_addr() 709 offload->ipv6_count = count; in ath12k_wow_prepare_ns_offload() 725 offload->ipv4_count = ipv4_cnt; in ath12k_wow_prepare_arp_offload() 740 offload = kmalloc(sizeof(*offload), GFP_KERNEL); in ath12k_wow_arp_ns_offload() 741 if (!offload) in ath12k_wow_arp_ns_offload() 748 memset(offload, 0, sizeof(*offload)); in ath12k_wow_arp_ns_offload() [all …]
|
| /linux/drivers/net/ethernet/netronome/ |
| A D | Kconfig | 33 bool "NFP4000/NFP6000 TC Flower offload support" 39 Enable driver support for TC Flower offload on NFP4000 and NFP6000. 40 Say Y, if you are planning to make use of TC Flower offload 42 TC Flower offload requires specific FW to work. 58 bool "NFP IPsec crypto offload support" 63 Enable driver support IPsec crypto offload on NFP NIC. 65 offload. NOTE that IPsec crypto offload on NFP NIC
|
| /linux/drivers/net/can/ |
| A D | ti_hecc.c | 179 struct can_rx_offload offload; member 525 return container_of(offload, struct ti_hecc_priv, offload); in rx_offload_to_priv() 544 skb = alloc_can_skb(offload->dev, &cf); in ti_hecc_mailbox_read() 779 can_rx_offload_irq_finish(&priv->offload); in ti_hecc_interrupt() 808 can_rx_offload_enable(&priv->offload); in ti_hecc_open() 819 can_rx_offload_disable(&priv->offload); in ti_hecc_close() 933 priv->offload.mailbox_read = ti_hecc_mailbox_read; in ti_hecc_probe() 934 priv->offload.mb_first = HECC_RX_FIRST_MBOX; in ti_hecc_probe() 935 priv->offload.mb_last = HECC_RX_LAST_MBOX; in ti_hecc_probe() 954 can_rx_offload_del(&priv->offload); in ti_hecc_probe() [all …]
|
| A D | at91_can.c | 153 struct can_rx_offload offload; member 170 return container_of(offload, struct at91_priv, offload); in rx_offload_to_priv() 603 skb = alloc_can_skb(offload->dev, &cf); in at91_mailbox_read() 632 at91_rx_overflow_err(offload->dev); in at91_mailbox_read() 867 can_rx_offload_irq_finish(&priv->offload); in at91_irq() 898 can_rx_offload_enable(&priv->offload); in at91_open() 920 can_rx_offload_disable(&priv->offload); in at91_close() 1123 priv->offload.mailbox_read = at91_mailbox_read; in at91_can_probe() 1124 priv->offload.mb_first = devtype_data->rx_first; in at91_can_probe() 1125 priv->offload.mb_last = devtype_data->rx_last; in at91_can_probe() [all …]
|
| A D | bxcan.c | 167 struct can_rx_offload offload; member 358 return container_of(offload, struct bxcan_priv, offload); in rx_offload_to_priv() 381 skb = alloc_can_skb(offload->dev, &cf); in bxcan_mailbox_read() 423 can_rx_offload_irq_offload_fifo(&priv->offload); in bxcan_rx_isr() 424 can_rx_offload_irq_finish(&priv->offload); in bxcan_rx_isr() 621 can_rx_offload_irq_finish(&priv->offload); in bxcan_state_change_isr() 757 can_rx_offload_enable(&priv->offload); in bxcan_open() 796 can_rx_offload_disable(&priv->offload); in bxcan_open() 828 can_rx_offload_disable(&priv->offload); in bxcan_stop() 1029 can_rx_offload_del(&priv->offload); in bxcan_probe() [all …]
|
| /linux/net/sched/ |
| A D | sch_taprio.c | 1372 *offload) in taprio_offload_get() 1377 offload); in taprio_offload_get() 1381 return offload; in taprio_offload_get() 1390 offload); in taprio_offload_free() 1466 offload->num_entries = i; in taprio_sched_to_offload() 1527 if (!offload) { in taprio_enable_offload() 1533 offload->extack = extack; in taprio_enable_offload() 1556 offload->extack = NULL; in taprio_enable_offload() 1575 if (!offload) { in taprio_disable_offload() 2366 return taprio_dump_xstats(sch, d, &offload, &offload.stats); in taprio_dump_stats() [all …]
|
| /linux/drivers/net/ethernet/intel/idpf/ |
| A D | idpf_singleq_txrx.c | 330 struct idpf_tx_offload_params *offload) in idpf_tx_singleq_build_ctx_desc() argument 335 if (offload->tso_segs) { in idpf_tx_singleq_build_ctx_desc() 338 offload->tso_len); in idpf_tx_singleq_build_ctx_desc() 363 struct idpf_tx_offload_params offload = { }; in idpf_tx_singleq_frame() local 387 offload.tx_flags |= IDPF_TX_FLAGS_IPV4; in idpf_tx_singleq_frame() 389 offload.tx_flags |= IDPF_TX_FLAGS_IPV6; in idpf_tx_singleq_frame() 391 tso = idpf_tso(skb, &offload); in idpf_tx_singleq_frame() 395 csum = idpf_tx_singleq_csum(skb, &offload); in idpf_tx_singleq_frame() 399 if (tso || offload.cd_tunneling) in idpf_tx_singleq_frame() 407 first->packets = offload.tso_segs; in idpf_tx_singleq_frame() [all …]
|
| /linux/Documentation/devicetree/bindings/net/ |
| A D | xlnx,axi-ethernet.yaml | 74 TX checksum offload. 0 or empty for disabling TX checksum offload, 75 1 to enable partial TX checksum offload and 2 to enable full TX 76 checksum offload. 82 RX checksum offload. 0 or empty for disabling RX checksum offload, 83 1 to enable partial RX checksum offload and 2 to enable full RX 84 checksum offload.
|
| /linux/net/tls/ |
| A D | Kconfig | 21 bool "Transport Layer Security HW offload" 28 Enable kernel support for HW offload of the TLS protocol. 37 Enable kernel support for legacy HW offload of the TLS protocol,
|
| /linux/Documentation/scsi/ |
| A D | bnx2fc.rst | 6 Broadcom FCoE offload through bnx2fc is full stateful hardware offload that 12 Despite the fact that the Broadcom's FCoE offload is fully offloaded, it does 14 interface (e.g. eth0) associated with the FCoE offload initiator must be 'up'. 18 Furthermore, the Broadcom FCoE offload solution creates VLAN interfaces to
|
| /linux/Documentation/crypto/ |
| A D | async-tx-api.rst | 34 the details of different hardware offload engine implementations. Code 36 the API will fit the chain of operations to the available offload 42 The API was initially designed to offload the memory copy and 43 xor-parity-calculations of the md-raid5 driver using the offload engines 50 the platform they are running on has offload capabilities. The 96 resources, under control of the offload engine driver, to be reused as 131 context if the offload engine driver supports interrupts, or it is 260 offload engine channel management routines 262 location for offload engine drivers 268 copy offload [all …]
|
| /linux/drivers/scsi/bnx2fc/ |
| A D | Kconfig | 3 tristate "QLogic FCoE offload support" 14 This driver supports FCoE offload for the QLogic devices.
|
| /linux/tools/testing/selftests/net/ |
| A D | netdevice.sh | 137 ethtool --offload "$netdev" "$feature" off 145 ethtool --offload "$netdev" "$feature" on 154 ethtool --offload "$netdev" "$feature" "$VALUE"
|
| /linux/drivers/net/can/flexcan/ |
| A D | flexcan-core.c | 957 return container_of(offload, struct flexcan_priv, offload); in rx_offload_to_priv() 989 offload->dev->stats.rx_over_errors++; in flexcan_mailbox_read() 990 offload->dev->stats.rx_errors++; in flexcan_mailbox_read() 1006 skb = alloc_canfd_skb(offload->dev, &cfd); in flexcan_mailbox_read() 1402 priv->offload.mb_first); in flexcan_rx_offload_setup() 1588 for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) { in flexcan_chip_start() 1747 can_rx_offload_enable(&priv->offload); in flexcan_open() 1776 can_rx_offload_disable(&priv->offload); in flexcan_open() 1779 can_rx_offload_del(&priv->offload); in flexcan_open() 1803 can_rx_offload_disable(&priv->offload); in flexcan_close() [all …]
|
| /linux/drivers/net/netdevsim/ |
| A D | bpf.c | 68 state = env->prog->aux->offload->dev_priv; in nsim_bpf_verify_insn() 99 state = prog->aux->offload->dev_priv; in nsim_prog_set_loaded() 147 if (prog && !prog->aux->offload && !ns->bpf_tc_non_bound_accept) { in nsim_bpf_setup_tc_block_cb() 249 prog->aux->offload->dev_priv = state; in nsim_bpf_create_prog() 257 bpf_offload_dev_priv(prog->aux->offload->offdev); in nsim_bpf_verifier_prep() 267 struct nsim_bpf_bound_prog *state = prog->aux->offload->dev_priv; in nsim_bpf_translate() 277 state = prog->aux->offload->dev_priv; in nsim_bpf_destroy_prog() 295 if (bpf->prog && bpf->prog->aux->offload) { in nsim_setup_prog_checks() 319 state = bpf->prog->aux->offload->dev_priv; in nsim_setup_prog_hw_checks()
|