| /linux/drivers/net/ethernet/intel/ice/ |
| A D | ice_fwlog.c | 13 head = rings->head; in ice_fwlog_ring_full() 14 tail = rings->tail; in ice_fwlog_ring_full() 26 return rings->head == rings->tail; in ice_fwlog_ring_empty() 45 struct ice_fwlog_data *ring = &rings->rings[i]; in ice_fwlog_alloc_ring_buffs() 60 struct ice_fwlog_data *ring = &rings->rings[i]; in ice_fwlog_free_ring_buffs() 102 ring.rings = kcalloc(ring_size, sizeof(*ring.rings), GFP_KERNEL); in ice_fwlog_realloc_rings() 103 if (!ring.rings) in ice_fwlog_realloc_rings() 112 kfree(ring.rings); in ice_fwlog_realloc_rings() 117 kfree(hw->fwlog_ring.rings); in ice_fwlog_realloc_rings() 119 hw->fwlog_ring.rings = ring.rings; in ice_fwlog_realloc_rings() [all …]
|
| A D | ice_fwlog.h | 56 struct ice_fwlog_data *rings; member 67 bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings); 68 bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings);
|
| /linux/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ |
| A D | flowring.c | 160 flow->rings[i] = ring; in brcmf_flowring_create() 172 ring = flow->rings[flowid]; in brcmf_flowring_tid() 192 ring = flow->rings[flowid]; in brcmf_flowring_block() 202 ring = flow->rings[i]; in brcmf_flowring_block() 236 ring = flow->rings[flowid]; in brcmf_flowring_delete() 247 flow->rings[flowid] = NULL; in brcmf_flowring_delete() 264 ring = flow->rings[flowid]; in brcmf_flowring_enqueue() 372 flow->rings = kcalloc(nrofrings, sizeof(*flow->rings), in brcmf_flowring_attach() 374 if (!flow->rings) { in brcmf_flowring_attach() 393 if (flow->rings[flowid]) in brcmf_flowring_detach() [all …]
|
| /linux/net/9p/ |
| A D | trans_xen.c | 57 struct xen_9pfs_dataring *rings; member 134 ring = &priv->rings[num]; in p9_xen_request() 286 if (!priv->rings[i].intf) in xen_9pfs_front_free() 288 if (priv->rings[i].irq > 0) in xen_9pfs_front_free() 290 if (priv->rings[i].data.in) { in xen_9pfs_front_free() 306 kfree(priv->rings); in xen_9pfs_front_free() 410 priv->rings = kcalloc(XEN_9PFS_NUM_RINGS, sizeof(*priv->rings), in xen_9pfs_front_init() 412 if (!priv->rings) { in xen_9pfs_front_init() 418 priv->rings[i].priv = priv; in xen_9pfs_front_init() 445 priv->rings[i].ref); in xen_9pfs_front_init() [all …]
|
| /linux/tools/net/ynl/samples/ |
| A D | ethtool.c | 16 struct ethtool_rings_get_list *rings; in main() local 42 rings = ethtool_rings_get_dump(ys, &rreq); in main() 43 if (!rings) in main() 47 ynl_dump_foreach(rings, dev) { in main() 55 ethtool_rings_get_list_free(rings); in main()
|
| /linux/drivers/i3c/master/mipi-i3c-hci/ |
| A D | dma.c | 156 if (!rings) in hci_dma_cleanup() 160 rh = &rings->headers[i]; in hci_dma_cleanup() 189 kfree(rings); in hci_dma_cleanup() 195 struct hci_rings_data *rings; in hci_dma_init() local 211 rings = kzalloc(struct_size(rings, headers, nr_rings), GFP_KERNEL); in hci_dma_init() 212 if (!rings) in hci_dma_init() 214 hci->io_data = rings; in hci_dma_init() 215 rings->total = nr_rings; in hci_dma_init() 227 rh = &rings->headers[i]; in hci_dma_init() 369 rh = &rings->headers[ring]; in hci_dma_queue_xfer() [all …]
|
| /linux/tools/testing/selftests/net/ |
| A D | psock_fanout.c | 235 static int sock_fanout_read(int fds[], char *rings[], const int expect[]) in sock_fanout_read() argument 239 ret[0] = sock_fanout_read_ring(fds[0], rings[0]); in sock_fanout_read() 240 ret[1] = sock_fanout_read_ring(fds[1], rings[1]); in sock_fanout_read() 412 char *rings[2]; in test_datapath() local 431 rings[0] = sock_fanout_open_ring(fds[0]); in test_datapath() 432 rings[1] = sock_fanout_open_ring(fds[1]); in test_datapath() 435 sock_fanout_read(fds, rings, expect0); in test_datapath() 440 ret = sock_fanout_read(fds, rings, expect1); in test_datapath() 445 ret |= sock_fanout_read(fds, rings, expect2); in test_datapath() 447 if (munmap(rings[1], RING_NUM_FRAMES * getpagesize()) || in test_datapath() [all …]
|
| A D | toeplitz.c | 104 static struct ring_state rings[RSS_MAX_CPUS]; variable 250 do {} while (recv_block(&rings[i])); in process_rings() 404 rings[i].cpu = i; in setup_rings() 405 rings[i].fd = create_ring(&rings[i].mmap); in setup_rings() 410 set_filter(rings[i].fd); in setup_rings() 418 if (munmap(rings[i].mmap, ring_block_nr * ring_block_sz)) in cleanup_rings() 420 if (close(rings[i].fd)) in cleanup_rings()
|
| /linux/Documentation/devicetree/bindings/soc/ti/ |
| A D | k3-ringacc.yaml | 56 ti,num-rings: 58 description: Number of rings supported by RA 60 ti,sci-rm-range-gp-rings: 69 - ti,num-rings 70 - ti,sci-rm-range-gp-rings 90 ti,num-rings = <818>; 91 ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */
|
| /linux/Documentation/mhi/ |
| A D | mhi.rst | 58 Transfer rings: Used by the host to schedule work items for a channel. The 71 rings are organized as a circular queue of Command Descriptors (CD). 81 Two unidirectional channels with their associated transfer rings form a 87 Transfer rings 91 Transfer Descriptors (TD). TDs are managed through transfer rings, which are 101 Below is the basic usage of transfer rings: 110 buffer information, increments the WP to the next element and rings the 113 Event rings 119 to the host. Event rings are the array of EDs that resides in the host 128 Below is the basic usage of event rings: [all …]
|
| /linux/tools/lib/bpf/ |
| A D | ringbuf.c | 37 struct ring **rings; member 102 tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings)); in ring_buffer__add() 105 rb->rings = tmp; in ring_buffer__add() 115 rb->rings[rb->ring_cnt] = r; in ring_buffer__add() 180 ringbuf_free_ring(rb, rb->rings[i]); in ring_buffer__free() 185 free(rb->rings); in ring_buffer__free() 293 struct ring *ring = rb->rings[i]; in ring_buffer__consume_n() 318 struct ring *ring = rb->rings[i]; in ring_buffer__consume() 347 struct ring *ring = rb->rings[ring_id]; in ring_buffer__poll() 370 return rb->rings[idx]; in ring_buffer__ring()
|
| /linux/drivers/crypto/intel/qat/qat_common/ |
| A D | adf_transport.c | 267 ring = &bank->rings[ring_num]; in adf_create_ring() 338 adf_handle_response(&bank->rings[i]); in adf_ring_response_handler() 406 bank->rings = kzalloc_node(size, GFP_KERNEL, in adf_init_bank() 408 if (!bank->rings) in adf_init_bank() 425 ring = &bank->rings[i]; in adf_init_bank() 439 tx_ring = &bank->rings[i - hw_data->tx_rx_gap]; in adf_init_bank() 456 ring = &bank->rings[i]; in adf_init_bank() 460 kfree(bank->rings); in adf_init_bank() 530 struct adf_etr_ring_data *ring = &bank->rings[i]; in cleanup_bank() 538 kfree(bank->rings); in cleanup_bank() [all …]
|
| A D | adf_gen4_hw_data.c | 515 state->rings[i].head = ops->read_csr_ring_head(base, bank, i); in bank_state_save() 516 state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i); in bank_state_save() 517 state->rings[i].config = ops->read_csr_ring_config(base, bank, i); in bank_state_save() 518 state->rings[i].base = ops->read_csr_ring_base(base, bank, i); in bank_state_save() 539 ops->write_csr_ring_base(base, bank, i, state->rings[i].base); in bank_state_restore() 548 ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head); in bank_state_restore() 549 ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail); in bank_state_restore() 560 ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head); in bank_state_restore() 563 ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail); in bank_state_restore() 568 ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head); in bank_state_restore() [all …]
|
| /linux/drivers/soc/ti/ |
| A D | k3-ringacc.c | 219 struct k3_ring *rings; member 381 ringacc->rings[id].use_count++; in k3_ringacc_request_ring() 383 return &ringacc->rings[id]; in k3_ringacc_request_ring() 419 *fwd_ring = &ringacc->rings[fwd_id]; in k3_dmaring_request_dual_ring() 422 ringacc->rings[fwd_id].use_count++; in k3_dmaring_request_dual_ring() 1409 ringacc->rings = devm_kzalloc(dev, in k3_ringacc_init() 1410 sizeof(*ringacc->rings) * in k3_ringacc_init() 1422 ringacc->rings[i].rt = base_rt + in k3_ringacc_init() 1427 ringacc->rings[i].ring_id = i; in k3_ringacc_init() 1487 ringacc->rings = devm_kzalloc(dev, in k3_ringacc_dmarings_init() [all …]
|
| /linux/io_uring/ |
| A D | io_uring.c | 186 return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head); in __io_cqring_events_user() 667 if (ctx->rings) in io_cqring_overflow_kill() 780 struct io_rings *rings = ctx->rings; in io_cqe_cache_refill() local 2243 struct io_rings *rings = ctx->rings; in io_commit_sqring() local 2518 struct io_rings *rings = ctx->rings; in io_cqring_wait() local 2638 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; in io_cqring_wait() 2667 vunmap(ctx->rings); in io_rings_free() 2671 ctx->rings = NULL; in io_rings_free() 3095 if (!ctx->rings) in io_uring_try_cancel_requests() 3449 if (IS_ERR(rings)) in io_allocate_scq_urings() [all …]
|
| A D | io_uring.h | 58 int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail; in io_should_wake() 256 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); in io_commit_cqring() 285 struct io_rings *r = ctx->rings; in io_sqring_full() 299 struct io_rings *rings = ctx->rings; in io_sqring_entries() local 303 entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; in io_sqring_entries()
|
| /linux/drivers/block/xen-blkback/ |
| A D | xenbus.c | 84 if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev_file) in xen_update_blkif_status() 110 ring = &blkif->rings[i]; in xen_update_blkif_status() 124 ring = &blkif->rings[i]; in xen_update_blkif_status() 136 if (!blkif->rings) in xen_blkif_alloc_rings() 140 struct xen_blkif_ring *ring = &blkif->rings[r]; in xen_blkif_alloc_rings() 274 struct xen_blkif_ring *ring = &blkif->rings[r]; in xen_blkif_disconnect() 338 kfree(blkif->rings); in xen_blkif_disconnect() 339 blkif->rings = NULL; in xen_blkif_disconnect() 389 if (!blkif->rings) \ 393 struct xen_blkif_ring *ring = &blkif->rings[i]; \ [all …]
|
| /linux/Documentation/networking/ |
| A D | af_xdp.rst | 24 syscall. Associated with each XSK are two rings: the RX ring and the 26 packets on the TX ring. These rings are registered and sized with the 28 to have at least one of these rings for each socket. An RX or TX 42 UMEM also has two rings: the FILL ring and the COMPLETION ring. The 51 and the TX and COMPLETION rings are used for the TX path. 59 corresponding two rings, sets the XDP_SHARED_UMEM flag in the bind 136 one FILL ring, one COMPLETION ring, four TX rings and four RX rings. 138 The rings are head(producer)/tail(consumer) based rings. A producer 149 The size of the rings need to be of size power of two. 264 rings as usual, but you are going to have one or more FILL and [all …]
|
| /linux/drivers/crypto/inside-secure/ |
| A D | safexcel.c | 51 for (i = 0; i < priv->config.rings; i++) { in eip197_trc_cache_setupvirt() 510 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_setup_cdesc_rings() 558 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_setup_rdesc_rings() 600 priv->config.pes, priv->config.rings); in safexcel_hw_init() 720 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_init() 746 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_init() 1331 priv->config.rings = min_t(u32, priv->config.rings, in safexcel_configure() 1601 priv->config.rings + 1, in safexcel_probe_generic() 1602 priv->config.rings + 1, in safexcel_probe_generic() 1617 for (i = 0; i < priv->config.rings; i++) { in safexcel_probe_generic() [all …]
|
| /linux/Documentation/devicetree/bindings/soc/qcom/ |
| A D | qcom,wcnss.yaml | 75 Should reference the tx-enable and tx-rings-empty SMEM states. 80 - const: tx-rings-empty 131 qcom,smem-state-names = "tx-enable", "tx-rings-empty";
|
| /linux/include/linux/ |
| A D | ptr_ring.h | 621 static inline int ptr_ring_resize_multiple_noprof(struct ptr_ring **rings, in ptr_ring_resize_multiple_noprof() argument 641 spin_lock_irqsave(&(rings[i])->consumer_lock, flags); in ptr_ring_resize_multiple_noprof() 642 spin_lock(&(rings[i])->producer_lock); in ptr_ring_resize_multiple_noprof() 643 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], in ptr_ring_resize_multiple_noprof() 645 spin_unlock(&(rings[i])->producer_lock); in ptr_ring_resize_multiple_noprof() 646 spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); in ptr_ring_resize_multiple_noprof()
|
| A D | skb_array.h | 202 static inline int skb_array_resize_multiple_noprof(struct skb_array **rings, in skb_array_resize_multiple_noprof() argument 207 return ptr_ring_resize_multiple_noprof((struct ptr_ring **)rings, in skb_array_resize_multiple_noprof()
|
| /linux/drivers/net/ethernet/netronome/nfp/ |
| A D | Makefile | 24 nfd3/rings.o \ 27 nfdk/rings.o \
|
| /linux/Documentation/networking/device_drivers/ethernet/google/ |
| A D | gve.rst | 125 The descriptor rings are power-of-two-sized ring buffers consisting of 136 gve maps the buffers for transmit rings into a FIFO and copies the packets 141 The buffers for receive rings are put into a data ring that is the same 143 the rings together.
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| A D | amdgpu_fence.c | 601 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_hw_fini() 631 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_isr_toggle() 648 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_sw_fini() 687 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_hw_init() 901 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_debugfs_fence_info_show()
|