| /drivers/usb/gadget/function/ |
| A D | uvc_queue.c | 134 queue->queue.type = type; in uvcg_queue_init() 136 queue->queue.drv_priv = queue; in uvcg_queue_init() 138 queue->queue.ops = &uvc_queue_qops; in uvcg_queue_init() 139 queue->queue.lock = lock; in uvcg_queue_init() 149 queue->queue.dev = dev; in uvcg_queue_init() 151 ret = vb2_queue_init(&queue->queue); in uvcg_queue_init() 167 vb2_queue_release(&queue->queue); in uvcg_free_buffers() 178 ret = vb2_reqbufs(&queue->queue, rb); in uvcg_alloc_buffers() 217 return vb2_mmap(&queue->queue, vma); in uvcg_queue_mmap() 295 ret = vb2_streamon(&queue->queue, queue->queue.type); in uvcg_queue_enable() [all …]
|
| /drivers/net/wireless/st/cw1200/ |
| A D | queue.c | 97 list_for_each_entry_safe(iter, tmp, &queue->queue, head) { in __cw1200_queue_gc() 119 if (queue->num_queued <= (queue->capacity >> 1)) { in __cw1200_queue_gc() 172 memset(queue, 0, sizeof(*queue)); in cw1200_queue_init() 177 INIT_LIST_HEAD(&queue->queue); in cw1200_queue_init() 197 list_add_tail(&queue->pool[i].head, &queue->free_pool); in cw1200_queue_init() 211 list_splice_tail_init(&queue->queue, &queue->pending); in cw1200_queue_clear() 268 ret = queue->num_queued - queue->num_pending; in cw1200_queue_get_num_queued() 296 list_move_tail(&item->head, &queue->queue); in cw1200_queue_put() 343 list_for_each_entry(item, &queue->queue, head) { in cw1200_queue_get() 408 list_move(&item->head, &queue->queue); in cw1200_queue_requeue() [all …]
|
| /drivers/md/dm-vdo/ |
| A D | funnel-workqueue.c | 156 queue->common.type->start(queue->private); in run_start_hook() 162 queue->common.type->finish(queue->private); in run_finish_hook() 279 vdo_free(queue); in free_simple_work_queue() 294 vdo_free(queue); in free_round_robin_work_queue() 299 if (queue == NULL) in vdo_free_work_queue() 337 vdo_free(queue); in make_simple_work_queue() 402 &queue); in vdo_make_work_queue() 409 vdo_free(queue); in vdo_make_work_queue() 420 vdo_free(queue); in vdo_make_work_queue() 613 return (queue == NULL) ? NULL : &queue->common; in vdo_get_current_work_queue() [all …]
|
| A D | funnel-queue.c | 15 struct funnel_queue *queue; in vdo_make_funnel_queue() local 25 queue->stub.next = NULL; in vdo_make_funnel_queue() 26 queue->newest = &queue->stub; in vdo_make_funnel_queue() 27 queue->oldest = &queue->stub; in vdo_make_funnel_queue() 29 *queue_ptr = queue; in vdo_make_funnel_queue() 35 vdo_free(queue); in vdo_free_funnel_queue() 48 if (oldest == &queue->stub) { in get_oldest() 60 queue->oldest = oldest; in get_oldest() 83 vdo_funnel_queue_put(queue, &queue->stub); in get_oldest() 154 if (queue->oldest != &queue->stub) in vdo_is_funnel_queue_idle() [all …]
|
| /drivers/net/wireless/broadcom/b43legacy/ |
| A D | pio.c | 205 struct b43legacy_pioqueue *queue = packet->queue; in free_txpacket() local 219 struct b43legacy_pioqueue *queue = packet->queue; in pio_tx_packet() local 241 if (queue->tx_devq_used + octets > queue->tx_devq_size) in pio_tx_packet() 308 packet->queue = queue; in setup_txqueues() 323 queue = kzalloc(sizeof(*queue), GFP_KERNEL); in b43legacy_setup_pioqueue() 324 if (!queue) in b43legacy_setup_pioqueue() 381 if (!queue) in b43legacy_destroy_pioqueue() 413 if (!queue) in b43legacy_pio_init() 418 if (!queue) in b43legacy_pio_init() 423 if (!queue) in b43legacy_pio_init() [all …]
|
| /drivers/nvme/target/ |
| A D | tcp.c | 460 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_c2h_data_pdu() local 575 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_queue_response() local 641 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_try_send_data() local 753 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_try_send_ddgst() local 958 queue->idx, data->ttag, queue->nr_cmds); in nvmet_tcp_handle_h2c_data_pdu() 1017 queue->idx, queue->state); in nvmet_tcp_done_recv_pdu() 1029 queue->cmd = nvmet_tcp_get_cmd(queue); in nvmet_tcp_done_recv_pdu() 1033 queue->idx, queue->nr_cmds, queue->send_list_len, in nvmet_tcp_done_recv_pdu() 1075 queue->cmd->req.execute(&queue->cmd->req); in nvmet_tcp_done_recv_pdu() 1209 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_prep_recv_ddgst() local [all …]
|
| A D | rdma.c | 663 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_release_rsp() local 945 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_execute_command() local 1042 cmd->queue = queue; in nvmet_rdma_recv_done() 1053 rsp->queue = queue; in nvmet_rdma_recv_done() 1305 if (queue->port->pi_enable && queue->host_qid) in nvmet_rdma_create_queue_ib() 1313 queue->qp = queue->cm_id->qp; in nvmet_rdma_create_queue_ib() 1323 queue->cmds[i].queue = queue; in nvmet_rdma_create_queue_ib() 1359 nvmet_rdma_free_cmds(queue->dev, queue->cmds, in nvmet_rdma_free_queue() 1433 queue = kzalloc(sizeof(*queue), GFP_KERNEL); in nvmet_rdma_alloc_queue() 1476 queue->comp_vector = !queue->host_qid ? 0 : in nvmet_rdma_alloc_queue() [all …]
|
| /drivers/iio/buffer/ |
| A D | industrialio-buffer-dma.c | 101 struct iio_dma_buffer_queue *queue = block->queue; in iio_buffer_block_release() local 195 block->queue = queue; in iio_dma_buffer_alloc_block() 234 struct iio_dma_buffer_queue *queue = block->queue; in iio_dma_buffer_block_done() local 315 return queue->fileio.enabled || !atomic_read(&queue->num_dmabufs); in iio_dma_buffer_can_use_fileio() 344 queue->fileio.enabled = iio_dma_buffer_can_use_fileio(queue); in iio_dma_buffer_request_update() 458 if (!queue->ops) in iio_dma_buffer_submit_block() 464 ret = queue->ops->submit(queue, block); in iio_dma_buffer_submit_block() 527 if (queue->ops && queue->ops->abort) in iio_dma_buffer_disable() 528 queue->ops->abort(queue); in iio_dma_buffer_disable() 738 struct iio_dma_buffer_queue *queue = block->queue; in iio_dma_can_enqueue_block() local [all …]
|
| /drivers/net/xen-netback/ |
| A D | rx.c | 92 if (queue->rx_queue_len >= queue->rx_queue_max) { in xenvif_rx_queue_tail() 119 xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue)); in xenvif_rx_dequeue() 122 if (queue->rx_queue_len < queue->rx_queue_max) { in xenvif_rx_dequeue() 125 txq = netdev_get_tx_queue(queue->vif->dev, queue->id); in xenvif_rx_dequeue() 164 gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num); in xenvif_rx_copy_flush() 204 op = &queue->rx_copy.op[queue->rx_copy.num]; in xenvif_rx_copy_add() 226 queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons; in xenvif_rx_copy_add() 332 queue->rx.rsp_prod_pvt = queue->rx.req_cons; in xenvif_rx_complete() 473 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons); in xenvif_rx_skb() 474 rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons); in xenvif_rx_skb() [all …]
|
| A D | netback.c | 163 wake_up(&queue->wq); in xenvif_kick_thread() 191 max_credit = queue->remaining_credit + queue->credit_bytes; in tx_add_credit() 201 struct xenvif_queue *queue = timer_container_of(queue, t, in xenvif_tx_credit_callback() local 203 tx_add_credit(queue); in xenvif_tx_credit_callback() 347 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; in xenvif_tx_create_map_op() 934 if (queue->tx.sring->req_prod - queue->tx.req_cons > in xenvif_tx_build_gops() 939 queue->tx.sring->req_prod, queue->tx.req_cons, in xenvif_tx_build_gops() 1299 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >= in xenvif_zerocopy_callback() 1348 queue->pages_to_unmap[gop - queue->tx_unmap_ops] = in xenvif_tx_dealloc_action() 1528 return queue->dealloc_cons != queue->dealloc_prod; in tx_dealloc_work_todo() [all …]
|
| A D | interface.c | 314 if (queue->tx_irq != queue->rx_irq) in xenvif_up() 329 if (queue->tx_irq != queue->rx_irq) in xenvif_down() 565 queue->credit_bytes = queue->remaining_credit = ~0UL; in xenvif_init_queue() 689 unbind_from_irqhandler(queue->tx_irq, queue); in xenvif_disconnect_queue() 690 if (queue->tx_irq == queue->rx_irq) in xenvif_disconnect_queue() 696 unbind_from_irqhandler(queue->rx_irq, queue); in xenvif_disconnect_queue() 751 queue->name, queue); in xenvif_connect_data() 754 queue->tx_irq = queue->rx_irq = err; in xenvif_connect_data() 758 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), in xenvif_connect_data() 762 queue->tx_irq_name, queue); in xenvif_connect_data() [all …]
|
| /drivers/misc/genwqe/ |
| A D | card_ddcb.c | 84 return queue->ddcb_next == queue->ddcb_act; in queue_empty() 89 if (queue->ddcb_next >= queue->ddcb_act) in queue_enqueued_ddcbs() 90 return queue->ddcb_next - queue->ddcb_act; in queue_enqueued_ddcbs() 92 return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next); in queue_enqueued_ddcbs() 332 struct ddcb_queue *queue = req->queue; in copy_ddcb_results() local 451 queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max; in genwqe_check_ddcb_queue() 487 queue = req->queue; in __genwqe_wait_ddcb() 586 queue->ddcb_next = (queue->ddcb_next + 1) % queue->ddcb_max; in get_next_ddcb() 625 struct ddcb_queue *queue = req->queue; in __genwqe_purge_ddcb() local 772 queue = req->queue = &cd->queue; in __genwqe_enqueue_ddcb() [all …]
|
| /drivers/media/usb/uvc/ |
| A D | uvc_queue.c | 57 queue); in __uvc_queue_return_buffers() 146 list_add_tail(&buf->queue, &queue->irqqueue); in uvc_buffer_queue() 243 queue->queue.type = type; in uvc_queue_init() 244 queue->queue.io_modes = VB2_MMAP | VB2_USERPTR; in uvc_queue_init() 245 queue->queue.drv_priv = queue; in uvc_queue_init() 247 queue->queue.mem_ops = &vb2_vmalloc_memops; in uvc_queue_init() 250 queue->queue.lock = &queue->mutex; in uvc_queue_init() 254 queue->queue.ops = &uvc_meta_queue_qops; in uvc_queue_init() 257 queue->queue.io_modes |= VB2_DMABUF; in uvc_queue_init() 258 queue->queue.ops = &uvc_queue_qops; in uvc_queue_init() [all …]
|
| /drivers/md/dm-vdo/indexer/ |
| A D | funnel-requestqueue.c | 191 queue->processor(request); in request_queue_worker() 206 queue->running = true; in uds_make_request_queue() 223 &queue->thread); in uds_make_request_queue() 229 queue->started = true; in uds_make_request_queue() 230 *queue_ptr = queue; in uds_make_request_queue() 246 sub_queue = request->requeued ? queue->retry_queue : queue->main_queue; in uds_request_queue_enqueue() 254 wake_up_worker(queue); in uds_request_queue_enqueue() 259 if (queue == NULL) in uds_request_queue_finish() 271 if (queue->started) { in uds_request_queue_finish() 272 wake_up_worker(queue); in uds_request_queue_finish() [all …]
|
| /drivers/net/ |
| A D | xen-netfront.c | 483 struct netfront_queue *queue = info->queue; in xennet_tx_setup_grant() local 620 .queue = queue, in xennet_xdp_xmit_one() 783 info.queue = queue; in xennet_start_xmit() 1827 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) in xennet_disconnect_backend() 1829 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { in xennet_disconnect_backend() 1834 queue->tx_irq = queue->rx_irq = 0; in xennet_disconnect_backend() 1919 queue->rx_evtchn = queue->tx_evtchn; in setup_netfront_single() 1920 queue->rx_irq = queue->tx_irq = err; in setup_netfront_single() 1946 queue->tx_irq_name, queue); in setup_netfront_split() 1955 queue->rx_irq_name, queue); in setup_netfront_split() [all …]
|
| /drivers/nvme/host/ |
| A D | tcp.c | 216 return queue - queue->ctrl->queues; in nvme_tcp_queue_id() 408 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_queue_request() local 566 req->queue = queue; in nvme_tcp_init_request() 706 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_h2c_data_pdu() local 1000 if (queue->recv_ddgst != queue->exp_ddgst) { in nvme_tcp_recv_ddgst() 1070 if (likely(queue && queue->rd_enabled) && in nvme_tcp_data_ready() 1137 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data() local 1200 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_cmd_pdu() local 1241 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data_pdu() local 1275 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_ddgst() local [all …]
|
| A D | rdma.c | 161 return queue - queue->ctrl->queues; in nvme_rdma_queue_idx() 281 queue->qp = queue->cm_id->qp; in nvme_rdma_create_qp() 313 req->queue = queue; in nvme_rdma_init_request() 418 ib_cq_pool_put(queue->ib_cq, queue->cq_size); in nvme_rdma_free_cq() 475 queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size, in nvme_rdma_create_cq() 505 queue->cq_size = cq_factor * queue->queue_size + 1; in nvme_rdma_create_queue_ib() 535 queue->queue_size, nvme_rdma_queue_idx(queue)); in nvme_rdma_create_queue_ib() 546 queue->queue_size, nvme_rdma_queue_idx(queue)); in nvme_rdma_create_queue_ib() 1947 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_complete_timed_out() local 1956 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_timeout() local [all …]
|
| /drivers/net/wireless/ralink/rt2x00/ |
| A D | rt2x00queue.c | 25 struct data_queue *queue = entry->queue; in rt2x00queue_alloc_rxskb() local 37 frame_size = queue->data_size + queue->desc_size + queue->winfo_size; in rt2x00queue_alloc_rxskb() 529 struct data_queue *queue = entry->queue; in rt2x00queue_write_tx_descriptor() local 554 queue->rt2x00dev->ops->lib->kick_queue(queue); in rt2x00queue_kick_tx_queue() 853 entry = &queue->entries[queue->index[index]]; in rt2x00queue_get_entry() 863 struct data_queue *queue = entry->queue; in rt2x00queue_index_inc() local 875 if (queue->index[index] >= queue->limit) in rt2x00queue_index_inc() 941 queue->rt2x00dev->ops->lib->kick_queue(queue); in rt2x00queue_unpause_queue() 961 queue->rt2x00dev->ops->lib->start_queue(queue); in rt2x00queue_start_queue() 980 queue->rt2x00dev->ops->lib->stop_queue(queue); in rt2x00queue_stop_queue() [all …]
|
| /drivers/infiniband/hw/mana/ |
| A D | shadow_queue.h | 44 if (!queue->buffer) in create_shadow_queue() 60 return (queue->prod_idx - queue->cons_idx) >= queue->length; in shadow_queue_full() 65 return queue->prod_idx == queue->cons_idx; in shadow_queue_empty() 73 return ((u8 *)queue->buffer + index * queue->stride); in shadow_queue_get_element() 79 return shadow_queue_get_element(queue, queue->prod_idx); in shadow_queue_producer_entry() 85 if (queue->cons_idx == queue->next_to_complete_idx) in shadow_queue_get_next_to_consume() 88 return shadow_queue_get_element(queue, queue->cons_idx); in shadow_queue_get_next_to_consume() 94 if (queue->next_to_complete_idx == queue->prod_idx) in shadow_queue_get_next_to_complete() 97 return shadow_queue_get_element(queue, queue->next_to_complete_idx); in shadow_queue_get_next_to_complete() 102 queue->prod_idx++; in shadow_queue_advance_producer() [all …]
|
| /drivers/gpu/drm/imagination/ |
| A D | pvr_queue.c | 278 fence->queue = queue; in pvr_queue_fence_init() 1070 queue->ctx->fw_obj, queue->ctx_offset); in pvr_queue_cleanup_fw_context() 1097 if (!queue) in pvr_queue_job_init() 1269 queue = kzalloc(sizeof(*queue), GFP_KERNEL); in pvr_queue_create() 1270 if (!queue) in pvr_queue_create() 1275 queue->ctx = ctx; in pvr_queue_create() 1289 reg_state_init, queue, &queue->reg_state_obj); in pvr_queue_create() 1325 return queue; in pvr_queue_create() 1341 kfree(queue); in pvr_queue_create() 1395 if (!queue) in pvr_queue_destroy() [all …]
|
| /drivers/net/wireguard/ |
| A D | queueing.c | 30 memset(queue, 0, sizeof(*queue)); in wg_packet_queue_init() 31 queue->last_cpu = -1; in wg_packet_queue_init() 35 queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); in wg_packet_queue_init() 36 if (!queue->worker) { in wg_packet_queue_init() 51 #define STUB(queue) ((struct sk_buff *)&queue->empty) argument 56 queue->head = queue->tail = STUB(queue); in wg_prev_queue_init() 57 queue->peeked = NULL; in wg_prev_queue_init() 87 queue->tail = next; in wg_prev_queue_dequeue() 92 queue->tail = next; in wg_prev_queue_dequeue() 98 __wg_prev_queue_enqueue(queue, STUB(queue)); in wg_prev_queue_dequeue() [all …]
|
| /drivers/scsi/arm/ |
| A D | queue.c | 65 INIT_LIST_HEAD(&queue->head); in queue_initialise() 66 INIT_LIST_HEAD(&queue->free); in queue_initialise() 83 return queue->alloc != NULL; in queue_initialise() 95 kfree(queue->alloc); in queue_free() 115 if (list_empty(&queue->free)) in __queue_add() 118 l = queue->free.next; in __queue_add() 128 list_add(l, &queue->head); in __queue_add() 150 list_add(ent, &queue->free); in __queue_remove() 195 SCpnt = __queue_remove(queue, queue->head.next); in queue_remove() 247 __queue_remove(queue, l); in queue_remove_all_target() [all …]
|
| /drivers/crypto/hisilicon/sec/ |
| A D | sec_drv.c | 703 while (test_bit(queue->expected, queue->unprocessed)) { in sec_isr_handle() 704 clear_bit(queue->expected, queue->unprocessed); in sec_isr_handle() 708 queue->shadow[queue->expected]); in sec_isr_handle() 709 queue->shadow[queue->expected] = NULL; in sec_isr_handle() 710 queue->expected = (queue->expected + 1) % in sec_isr_handle() 735 IRQF_TRIGGER_RISING, queue->name, queue); in sec_queue_irq_init() 747 free_irq(queue->task_irq, queue); in sec_queue_irq_uninit() 907 sec_queue_cmdbase_addr(queue, queue->ring_cmd.paddr); in sec_queue_hw_init() 909 sec_queue_outorder_addr(queue, queue->ring_cq.paddr); in sec_queue_hw_init() 911 sec_queue_errbase_addr(queue, queue->ring_db.paddr); in sec_queue_hw_init() [all …]
|
| /drivers/net/ethernet/ibm/ehea/ |
| A D | ehea_qmr.h | 208 return hw_qeit_calc(queue, queue->current_q_offset); in hw_qeit_get() 213 queue->current_q_offset += queue->qe_size; in hw_qeit_inc() 214 if (queue->current_q_offset >= queue->queue_length) { in hw_qeit_inc() 217 queue->toggle_state = (~queue->toggle_state) & 1; in hw_qeit_inc() 224 hw_qeit_inc(queue); in hw_qeit_get_inc() 236 hw_qeit_inc(queue); in hw_qeit_get_inc_valid() 237 pref = hw_qeit_calc(queue, queue->current_q_offset); in hw_qeit_get_inc_valid() 251 pref = hw_qeit_calc(queue, queue->current_q_offset); in hw_qeit_get_valid() 269 u64 last_entry_in_q = queue->queue_length - queue->qe_size; in hw_qeit_eq_get_inc() 273 queue->current_q_offset += queue->qe_size; in hw_qeit_eq_get_inc() [all …]
|
| /drivers/soc/ixp4xx/ |
| A D | ixp4xx-qmgr.c | 35 qmgr_queue_descs[queue], queue, val); in qmgr_put_entry() 48 qmgr_queue_descs[queue], queue, val); in qmgr_get_entry() 86 if (queue >= HALF_QUEUES) in qmgr_stat_below_low_watermark() 100 if (queue >= HALF_QUEUES) in qmgr_stat_full() 136 irq_pdevs[queue] = pdev; in qmgr_set_irq() 208 int half = queue / 32; in qmgr_enable_irq() 220 int half = queue / 32; in qmgr_disable_irq() 252 BUG_ON(queue >= QUEUES); in qmgr_request_queue() 318 qmgr_queue_descs[queue], queue, addr); in qmgr_request_queue() 355 qmgr_queue_descs[queue], queue); in qmgr_release_queue() [all …]
|