Home
last modified time | relevance | path

Searched refs:tc (Results 1 – 25 of 28) sorted by relevance

12

/net/rds/
A Dtcp.c120 tc->t_sock = NULL; in rds_tcp_restore_callbacks()
174 if (tc->t_tinc) { in rds_tcp_reset_callbacks()
176 tc->t_tinc = NULL; in rds_tcp_reset_callbacks()
179 tc->t_tinc_data_rem = 0; in rds_tcp_reset_callbacks()
215 tc->t_sock = sock; in rds_tcp_set_callbacks()
216 tc->t_cpath = cp; in rds_tcp_set_callbacks()
361 list_del(&tc->t_tcp_node); in rds_tcp_conn_free()
375 if (!tc) { in rds_tcp_conn_alloc()
380 tc->t_sock = NULL; in rds_tcp_conn_alloc()
381 tc->t_tinc = NULL; in rds_tcp_conn_alloc()
[all …]
A Dtcp_recv.c180 tc->t_tinc = tinc; in rds_tcp_data_recv()
201 tc->t_tinc_hdr_rem, in rds_tcp_data_recv()
203 tc->t_tinc_hdr_rem -= to_copy; in rds_tcp_data_recv()
209 tc->t_tinc_data_rem = in rds_tcp_data_recv()
237 if (tc->t_tinc_hdr_rem == 0 && tc->t_tinc_data_rem == 0) { in rds_tcp_data_recv()
249 tc->t_tinc_data_rem = 0; in rds_tcp_data_recv()
250 tc->t_tinc = NULL; in rds_tcp_data_recv()
298 cp->cp_index, tc, sock); in rds_tcp_recv_path()
311 struct rds_tcp_connection *tc; in rds_tcp_data_ready() local
323 tc = cp->cp_transport_data; in rds_tcp_data_ready()
[all …]
A Dtcp_send.c45 tcp_sock_set_cork(tc->t_sock->sk, true); in rds_tcp_xmit_path_prepare()
52 tcp_sock_set_cork(tc->t_sock->sk, false); in rds_tcp_xmit_path_complete()
85 tc->t_last_sent_nxt = rds_tcp_write_seq(tc); in rds_tcp_xmit()
86 rm->m_ack_seq = tc->t_last_sent_nxt + in rds_tcp_xmit()
97 rm, rds_tcp_write_seq(tc), in rds_tcp_xmit()
105 ret = rds_tcp_sendmsg(tc->t_sock, in rds_tcp_xmit()
125 ret = sock_sendmsg(tc->t_sock, &msg); in rds_tcp_xmit()
183 struct rds_tcp_connection *tc; in rds_tcp_write_space() local
192 tc = cp->cp_transport_data; in rds_tcp_write_space()
194 write_space = tc->t_orig_write_space; in rds_tcp_write_space()
[all …]
A Dtcp_connect.c44 struct rds_tcp_connection *tc; in rds_tcp_state_change() local
52 tc = cp->cp_transport_data; in rds_tcp_state_change()
108 mutex_lock(&tc->t_conn_path_lock); in rds_tcp_conn_path_connect()
111 mutex_unlock(&tc->t_conn_path_lock); in rds_tcp_conn_path_connect()
189 mutex_unlock(&tc->t_conn_path_lock); in rds_tcp_conn_path_connect()
207 struct socket *sock = tc->t_sock; in rds_tcp_conn_path_shutdown()
210 cp->cp_conn, tc, sock); in rds_tcp_conn_path_shutdown()
223 if (tc->t_tinc) { in rds_tcp_conn_path_shutdown()
224 rds_inc_put(&tc->t_tinc->ti_inc); in rds_tcp_conn_path_shutdown()
225 tc->t_tinc = NULL; in rds_tcp_conn_path_shutdown()
[all …]
A Dtcp.h56 struct rds_tcp_connection *tc);
57 u32 rds_tcp_write_seq(struct rds_tcp_connection *tc);
58 u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
/net/sched/
A Dsch_taprio.c140 for (tc = 0; tc < num_tc; tc++) { in taprio_calculate_gate_durations()
157 for (tc = 0; tc < num_tc; tc++) in taprio_calculate_gate_durations()
282 for (tc = 0; tc < num_tc; tc++) { in taprio_update_queue_max_sdu()
669 for (tc = 0; tc < num_tc; tc++) { in taprio_set_budgets()
687 for (tc = 0; tc < num_tc; tc++) { in taprio_update_budgets()
785 for (tc = num_tc - 1; tc >= 0; tc--) { in taprio_dequeue_tc_priority()
965 for (tc = 0; tc < num_tc; tc++) { in advance_sched()
1259 for (tc = 0; tc < num_tc; tc++) { in setup_first_end_time()
1534 for (tc = 0; tc < TC_MAX_QUEUE; tc++) { in taprio_enable_offload()
1556 for (tc = 0; tc < TC_MAX_QUEUE; tc++) in taprio_enable_offload()
[all …]
A Dsch_mqprio_lib.c105 int tc, num_tc = netdev_get_num_tc(dev); in mqprio_qopt_reconstruct() local
110 for (tc = 0; tc < num_tc; tc++) { in mqprio_qopt_reconstruct()
111 qopt->count[tc] = dev->tc_to_txq[tc].count; in mqprio_qopt_reconstruct()
112 qopt->offset[tc] = dev->tc_to_txq[tc].offset; in mqprio_qopt_reconstruct()
121 int tc; in mqprio_fp_to_offload() local
123 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) in mqprio_fp_to_offload()
124 if (fp[tc] == TC_FP_PREEMPTIBLE) in mqprio_fp_to_offload()
125 preemptible_tcs |= BIT(tc); in mqprio_fp_to_offload()
A Dsch_mqprio.c175 int err, tc; in mqprio_parse_tc_entry() local
212 int tc, rem; in mqprio_parse_tc_entries() local
215 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) in mqprio_parse_tc_entries()
216 fp[tc] = priv->fp[tc]; in mqprio_parse_tc_entries()
225 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { in mqprio_parse_tc_entries()
226 priv->fp[tc] = fp[tc]; in mqprio_parse_tc_entries()
355 int len, tc; in mqprio_init() local
373 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) in mqprio_init()
530 int tc; in mqprio_dump_tc_entries() local
532 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { in mqprio_dump_tc_entries()
[all …]
A DKconfig26 from the package iproute2+tc at
757 Say Y here to allow packet sampling tc action. The packet sample
918 tristate "connection tracking tc action"
931 tristate "Frame gate entry list control tc action"
960 Say Y here to allow tc chain misses to continue in OvS datapath in
962 the correct chain in tc software datapath.
964 Say N here if you won't be using tc<->ovs offload or tc chains offload.
A Dact_mpls.c394 entry->mpls_push.tc = tcf_mpls_tc(act); in tcf_mpls_offload_act_setup()
405 entry->mpls_mangle.tc = tcf_mpls_tc(act); in tcf_mpls_offload_act_setup()
A Dcls_u32.c354 struct tc_u_common *tc; in tc_u_common_find() local
355 hlist_for_each_entry(tc, tc_u_hash(key), hnode) { in tc_u_common_find()
356 if (tc->ptr == key) in tc_u_common_find()
357 return tc; in tc_u_common_find()
A Dcls_flower.c949 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]); in fl_set_key_mpls_lse() local
951 if (tc & ~MPLS_TC_MASK) { in fl_set_key_mpls_lse()
957 lse_val->mpls_tc = tc; in fl_set_key_mpls_lse()
1058 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]); in fl_set_key_mpls() local
1060 if (tc & ~MPLS_TC_MASK) { in fl_set_key_mpls()
1066 lse_val->mpls_tc = tc; in fl_set_key_mpls()
/net/9p/
A Dtrans_virtio.c272 VIRTQUEUE_NUM, req->tc.sdata, req->tc.size); in p9_virtio_request()
448 memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4); in p9_virtio_zc_request()
455 sz = cpu_to_le32(req->tc.size + outlen); in p9_virtio_zc_request()
456 memcpy(&req->tc.sdata[0], &sz, sizeof(sz)); in p9_virtio_zc_request()
467 memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4); in p9_virtio_zc_request()
479 VIRTQUEUE_NUM, req->tc.sdata, req->tc.size); in p9_virtio_zc_request()
A Dclient.c303 p9pdu_reset(&req->tc); in p9_tag_alloc()
322 req->tc.tag = tag; in p9_tag_alloc()
343 p9_fcall_fini(&req->tc); in p9_tag_alloc()
373 if (req->tc.tag != tag) { in p9_tag_lookup()
394 u16 tag = r->tc.tag; in p9_tag_remove()
407 p9_fcall_fini(&r->tc); in p9_req_put()
433 req->tc.tag); in p9_tag_cleanup()
650 p9pdu_prepare(&req->tc, req->tc.tag, type); in p9_client_prepare_req()
654 p9pdu_finalize(c, &req->tc); in p9_client_prepare_req()
696 req->tc.zc = false; in p9_client_rpc()
[all …]
A Dtrans_rdma.c349 c->busa, c->req->tc.size, in send_done()
486 c->req->tc.sdata, c->req->tc.size, in rdma_request()
496 sge.length = c->req->tc.size; in rdma_request()
525 c->req->tc.size, DMA_TO_DEVICE); in rdma_request()
A Dtrans_xen.c120 u32 size = p9_req->tc.size; in p9_xen_request()
133 num = p9_req->tc.tag % XEN_9PFS_NUM_RINGS; in p9_xen_request()
155 xen_9pfs_write_packet(ring->data.out, p9_req->tc.sdata, size, in p9_xen_request()
A Dtrans_fd.c474 m->wbuf = req->tc.sdata; in p9_write_work()
475 m->wsize = req->tc.size; in p9_write_work()
675 m, current, &req->tc, req->tc.id); in p9_fd_request()
A Dtrans_usbg.c98 if (!(p9_tx_req->tc.size % usb9pfs->in_ep->maxpacket)) in usb9pfs_queue_tx()
101 req->buf = p9_tx_req->tc.sdata; in usb9pfs_queue_tx()
102 req->length = p9_tx_req->tc.size; in usb9pfs_queue_tx()
/net/6lowpan/
A Diphc.c1020 u8 tc = lowpan_iphc_get_tc(hdr), tf[4], val; in lowpan_iphc_tf_compress() local
1023 pr_debug("tc 0x%02x\n", tc); in lowpan_iphc_tf_compress()
1026 if (!tc) { in lowpan_iphc_tf_compress()
1037 lowpan_push_hc_data(hc_ptr, &tc, sizeof(tc)); in lowpan_iphc_tf_compress()
1042 if (!(tc & 0x3f)) { in lowpan_iphc_tf_compress()
1055 tf[0] |= (tc & 0xc0); in lowpan_iphc_tf_compress()
1068 memcpy(&tf[0], &tc, sizeof(tc)); in lowpan_iphc_tf_compress()
/net/core/
A Dnet-sysfs.c1440 int num_tc, tc, index, ret; in traffic_class_show() local
1455 tc = netdev_txq_to_tc(dev, index); in traffic_class_show()
1459 if (tc < 0) in traffic_class_show()
1470 sysfs_emit(buf, "%d\n", tc); in traffic_class_show()
1734 if (!dev_maps || tc >= dev_maps->num_tc) in xps_queue_show()
1766 int len, tc, ret; in xps_cpus_show() local
1780 tc = netdev_txq_to_tc(dev, index); in xps_cpus_show()
1781 if (tc < 0) { in xps_cpus_show()
1846 int tc, ret; in xps_rxqs_show() local
1854 tc = netdev_txq_to_tc(dev, index); in xps_rxqs_show()
[all …]
A Ddev.c2613 if (tc->offset + tc->count > txq) { in netif_setup_tc()
2623 tc = &dev->tc_to_txq[q]; in netif_setup_tc()
2624 if (tc->offset + tc->count > txq) { in netif_setup_tc()
2640 if ((txq - tc->offset) < tc->count) in netdev_txq_to_tc()
2819 if (i == tc && skip_tc) in xps_copy_dev_maps()
2852 if (tc < 0) in __netif_set_xps_queue()
2895 tci = j * num_tc + tc; in __netif_set_xps_queue()
2918 tci = j * num_tc + tc; in __netif_set_xps_queue()
2991 if (i == tc && in __netif_set_xps_queue()
3074 if (tc >= dev->num_tc) in netdev_set_tc_queue()
[all …]
/net/mpls/
A Dinternal.h14 u8 tc; member
181 result.tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT; in mpls_entry_decode()
/net/ipv6/
A Ddatagram.c1002 int tc; in ip6_datagram_send_ctl() local
1008 tc = *(int *)CMSG_DATA(cmsg); in ip6_datagram_send_ctl()
1009 if (tc < -1 || tc > 0xff) in ip6_datagram_send_ctl()
1013 ipc6->tclass = tc; in ip6_datagram_send_ctl()
/net/dsa/
A DKconfig132 use with tc-flower.
/net/netfilter/
A Dnf_flow_table_offload.c113 key->meta.ingress_ifindex = tuple->tc.iifidx; in nf_flow_rule_match()

Completed in 91 milliseconds

12