Home
last modified time | relevance | path

Searched refs:q (Results 1 – 25 of 1482) sorted by relevance

12345678910>>...60

/linux/lib/crypto/
A Dgf128mul.c57 q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\
58 q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\
59 q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\
60 q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\
61 q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\
62 q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\
63 q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\
64 q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\
65 q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\
66 q(0x48), q(0x49), q(0x4a), q(0x4b), q(0x4c), q(0x4d), q(0x4e), q(0x4f),\
[all …]
/linux/Documentation/networking/
A Dtls-offload-layers.svg1q-0.609375 -0.359375 -1.203125 -0.359375q-0.546875 0 -0.96875 0.328125q-0.421875 0.328125 -0.60937…
/linux/drivers/net/ethernet/fungible/funeth/
A Dfuneth_rx.c276 q->rqes[q->rq_cons & q->rq_mask] = in get_buf()
281 return &q->bufs[q->rq_cons & q->rq_mask]; in get_buf()
523 if (q->rq_cons - q->rq_cons_db >= q->rq_db_thres) { in fun_rxq_napi_poll()
525 q->stats.rx_bufs += q->rq_cons - q->rq_cons_db; in fun_rxq_napi_poll()
527 q->rq_cons_db = q->rq_cons; in fun_rxq_napi_poll()
528 writel((q->rq_cons - 1) & q->rq_mask, q->rq_db); in fun_rxq_napi_poll()
561 q->cur_buf = q->bufs; in fun_rxq_alloc_bufs()
669 q->rq_dma_addr, q->bufs); in fun_rxq_create_sw()
684 q->rqes, q->rq_dma_addr, q->bufs); in fun_rxq_free_sw()
704 err = xdp_rxq_info_reg(&q->xdp_rxq, q->netdev, q->qidx, in fun_rxq_create_dev()
[all …]
A Dfuneth_tx.c313 return q->mask - q->prod_cnt + q->cons_cnt; in fun_txq_avail()
431 for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask; in fun_txq_reclaim()
478 db_val = READ_ONCE(q->irq_db_val) | (q->cons_cnt & q->mask); in fun_txq_napi_poll()
545 idx = q->prod_cnt & q->mask; in fun_xdp_tx()
670 q->desc, q->dma_addr, q->info); in fun_txq_free_sw()
698 &q->hw_qid, &q->db); in fun_txq_create_dev()
709 q->ndq = netdev_get_tx_queue(q->netdev, q->qidx); in fun_txq_create_dev()
712 writel(q->irq_db_val, q->db); in fun_txq_create_dev()
719 q->ethid, q->numa_node); in fun_txq_create_dev()
740 q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid, in fun_txq_free_dev()
[all …]
/linux/sound/core/seq/
A Dseq_queue.c72 if (q) { in queue_list_remove()
92 q = kzalloc(sizeof(*q), GFP_KERNEL); in queue_new()
93 if (!q) in queue_new()
105 if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) { in queue_new()
117 return q; in queue_new()
135 kfree(q); in queue_delete()
175 return q; in snd_seq_queue_alloc()
203 if (q) in queueptr()
342 return (q->owner == client) || (!q->locked && !q->klocked); in check_access()
372 if (! q) in snd_seq_queue_check_access()
[all …]
/linux/sound/core/seq/oss/
A Dseq_oss_readq.c37 q = kzalloc(sizeof(*q), GFP_KERNEL); in snd_seq_oss_readq_new()
42 if (!q->q) { in snd_seq_oss_readq_new()
49 q->head = q->tail = 0; in snd_seq_oss_readq_new()
65 kfree(q->q); in snd_seq_oss_readq_delete()
78 q->head = q->tail = 0; in snd_seq_oss_readq_clear()
151 memcpy(&q->q[q->tail], ev, sizeof(*ev)); in snd_seq_oss_readq_put_event()
152 q->tail = (q->tail + 1) % q->maxlen; in snd_seq_oss_readq_put_event()
173 memcpy(rec, &q->q[q->head], sizeof(*rec)); in snd_seq_oss_readq_pick()
184 (q->qlen > 0 || q->head == q->tail), in snd_seq_oss_readq_wait()
196 q->head = (q->head + 1) % q->maxlen; in snd_seq_oss_readq_free()
[all …]
A Dseq_oss_writeq.c30 q = kzalloc(sizeof(*q), GFP_KERNEL); in snd_seq_oss_writeq_new()
31 if (!q) in snd_seq_oss_writeq_new()
33 q->dp = dp; in snd_seq_oss_writeq_new()
34 q->maxlen = maxlen; in snd_seq_oss_writeq_new()
36 q->sync_event_put = 0; in snd_seq_oss_writeq_new()
37 q->sync_time = 0; in snd_seq_oss_writeq_new()
47 return q; in snd_seq_oss_writeq_new()
56 if (q) { in snd_seq_oss_writeq_delete()
58 kfree(q); in snd_seq_oss_writeq_delete()
114 if (! q->sync_event_put || q->sync_time >= time) in snd_seq_oss_writeq_sync()
[all …]
/linux/drivers/gpu/drm/xe/
A Dxe_guc_submit.c314 q->guc->id, q->width); in __release_guc_id()
343 q->guc->id + i, q, GFP_NOWAIT)); in alloc_guc_id()
359 __release_guc_id(guc, q, q->width); in release_guc_id()
716 struct xe_exec_queue *q = job->q; in guc_exec_queue_run_job() local
862 struct xe_exec_queue *q = ge->q; in xe_guc_exec_queue_lr_cleanup() local
1030 struct xe_exec_queue *q = job->q; in guc_exec_queue_timedout_job() local
1131 if (q->vm && q->vm->xef) { in guc_exec_queue_timedout_job()
1150 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q), in guc_exec_queue_timedout_job()
1210 struct xe_exec_queue *q = ge->q; in __guc_exec_queue_fini_async() local
1407 ge->q = q; in guc_exec_queue_init()
[all …]
A Dxe_exec_queue.c39 if (q->vm) in __xe_exec_queue_free()
45 kfree(q); in __xe_exec_queue_free()
62 if (!q) in __xe_exec_queue_alloc()
120 q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K); in __xe_exec_queue_init()
130 err = q->ops->init(q); in __xe_exec_queue_init()
256 q->ops->fini(q); in xe_exec_queue_destroy()
637 if (q->vm && q->hwe->hw_engine_group) { in xe_exec_queue_create_ioctl()
680 args->value = q->ops->reset_status(q); in xe_exec_queue_get_property_ioctl()
774 if (!q->vm || !q->vm->xef) in xe_exec_queue_update_run_ticks()
811 q->ops->kill(q); in xe_exec_queue_kill()
[all …]
/linux/net/sched/
A Dsch_choke.c77 return (q->tail - q->head) & q->tab_mask; in choke_len()
96 q->head = (q->head + 1) & q->tab_mask; in choke_zap_head_holes()
97 if (q->head == q->tail) in choke_zap_head_holes()
106 q->tail = (q->tail - 1) & q->tab_mask; in choke_zap_tail_holes()
107 if (q->head == q->tail) in choke_zap_tail_holes()
205 if (q->head == q->tail) in choke_match_random()
269 q->tail = (q->tail + 1) & q->tab_mask; in choke_enqueue()
294 skb = q->tab[q->head]; in choke_dequeue()
311 q->head = (q->head + 1) & q->tab_mask; in choke_reset()
386 q->head = (q->head + 1) & q->tab_mask; in choke_change()
[all …]
A Dsch_netem.c299 return q->loss && q->loss >= get_crandom(&q->loss_cor, &q->prng); in loss_event()
382 rtnl_kfree_skbs(q->t_head, q->t_tail); in tfifo_reset()
458 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng)) in netem_enqueue()
477 if (q->latency || q->jitter || q->rate) in netem_enqueue()
493 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor, &q->prng)) { in netem_enqueue()
549 q->reorder < get_crandom(&q->reorder_cor, &q->prng)) { in netem_enqueue()
553 delay = tabledist(q->latency, q->jitter, in netem_enqueue()
554 &q->delay_cor, &q->prng, q->delay_dist); in netem_enqueue()
661 NULL, &q->prng, q->slot_dist); in get_slot_next()
767 skb = q->qdisc->ops->dequeue(q->qdisc); in netem_dequeue()
[all …]
A Dsch_sfq.c461 if (++sch->q.qlen <= q->limit) in sfq_enqueue()
618 if (!q->filter_list && q->tail) in sfq_perturbation()
668 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); in sfq_change()
675 q->maxflows = min_t(u32, q->maxflows, q->divisor); in sfq_change()
693 q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows); in sfq_change()
694 q->maxflows = min_t(u32, q->maxflows, q->limit); in sfq_change()
698 while (sch->q.qlen > q->limit) { in sfq_change()
765 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); in sfq_init()
775 q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor); in sfq_init()
776 q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows); in sfq_init()
[all …]
A Dsch_sfb.c192 memset(&q->bins, 0, sizeof(q->bins)); in sfb_zero_all_buckets()
202 const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0]; in sfb_compute_qlen()
226 sfb_init_perturbation(q->slot, q); in sfb_swap_slot()
227 q->slot ^= 1; in sfb_swap_slot()
236 if (q->penalty_rate == 0 || q->penalty_burst == 0) in sfb_rate_limit()
242 q->tokens_avail = (age * q->penalty_rate) / HZ; in sfb_rate_limit()
243 if (q->tokens_avail > q->penalty_burst) in sfb_rate_limit()
244 q->tokens_avail = q->penalty_burst; in sfb_rate_limit()
295 if (unlikely(sch->q.qlen >= q->limit)) { in sfb_enqueue()
460 q->slot = 0; in sfb_reset()
[all …]
A Dsch_fq_pie.c185 sch->q.qlen++; in fq_pie_qdisc_enqueue()
264 sch->q.qlen--; in fq_pie_qdisc_dequeue()
270 if (head == &q->new_flows && !list_empty(&q->old_flows)) in fq_pie_qdisc_dequeue()
312 if (!q->flows_cnt || q->flows_cnt > 65536) { in fq_pie_change()
385 struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer); in fq_pie_timer() local
396 max_cnt = min_t(int, q->flows_cnt - q->flows_cursor, 2048); in fq_pie_timer()
399 &q->flows[q->flows_cursor].vars, in fq_pie_timer()
400 q->flows[q->flows_cursor].backlog); in fq_pie_timer()
406 if (q->flows_cursor >= q->flows_cnt) { in fq_pie_timer()
427 q->sch = sch; in fq_pie_init()
[all …]
A Dsch_fq_codel.c180 sch->q.qlen -= i; in fq_codel_drop()
216 memory_limited = q->memory_usage > q->memory_limit; in fq_codel_enqueue()
265 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); in dequeue_func()
267 sch->q.qlen--; in dequeue_func()
309 if ((head == &q->new_flows) && !list_empty(&q->old_flows)) in fq_codel_dequeue()
320 if (q->cstats.drop_count && sch->q.qlen) { in fq_codel_dequeue()
349 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32)); in fq_codel_reset()
380 if (q->flows) in fq_codel_change()
444 q->memory_usage > q->memory_limit) { in fq_codel_change()
493 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); in fq_codel_init()
[all …]
A Dsch_skbprio.c44 for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) { in calc_new_high_prio()
96 sch->q.qlen++; in skbprio_enqueue()
125 if (q->lowest_prio == q->highest_prio) { in skbprio_enqueue()
131 q->lowest_prio = calc_new_low_prio(q); in skbprio_enqueue()
144 struct sk_buff_head *hpq = &q->qdiscs[q->highest_prio]; in skbprio_dequeue()
150 sch->q.qlen--; in skbprio_dequeue()
154 q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb); in skbprio_dequeue()
158 if (q->lowest_prio == q->highest_prio) { in skbprio_dequeue()
163 q->highest_prio = calc_new_high_prio(q); in skbprio_dequeue()
191 memset(&q->qstats, 0, sizeof(q->qstats)); in skbprio_init()
[all …]
/linux/net/xdp/
A Dxsk_queue.h130 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_addr_unchecked()
194 return q->cached_cons != q->cached_prod; in xskq_has_descs()
212 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_desc()
311 if (q->cached_prod == q->cached_cons) in xskq_cons_peek_addr_unchecked()
320 if (q->cached_prod == q->cached_cons) in xskq_cons_peek_desc()
349 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons); in xskq_prod_nb_free()
356 free_entries = q->nentries - (q->cached_prod - q->cached_cons); in xskq_prod_nb_free()
416 idx = q->cached_prod++ & q->ring_mask; in xskq_prod_reserve_desc()
431 __xskq_prod_submit(q, q->cached_prod); in xskq_prod_submit()
449 return q ? q->invalid_descs : 0; in xskq_nb_invalid_descs()
[all …]
/linux/drivers/media/common/videobuf2/
A Dvideobuf2-core.c168 ((q)->ops->op ? (q)->ops->op(args) : 0)
190 if (q && q->buf_ops && q->buf_ops->op) \
197 if (q && q->buf_ops && q->buf_ops->op) \
244 q->alloc_devs[plane] ? : q->dev, in __vb2_buf_mem_alloc()
850 q->bufs = kcalloc(q->max_num_buffers, sizeof(*q->bufs), GFP_KERNEL); in vb2_core_allocated_buffers_storage()
2040 call_void_qop(q, wait_prepare, q); in __vb2_wait_for_done_vb()
2054 call_void_qop(q, wait_finish, q); in __vb2_wait_for_done_vb()
2749 if (q->is_output && q->fileio && q->queued_count < vb2_get_num_buffers(q)) in vb2_core_poll()
2975 vb2_core_streamoff(q, q->type); in __vb2_cleanup_fileio()
3208 call_void_qop(q, wait_finish, q); in vb2_thread()
[all …]
/linux/drivers/spi/
A Dspi-fsl-qspi.c345 reg = qspi_readl(q, q->iobase + QUADSPI_FR); in fsl_qspi_irq_handler()
346 qspi_writel(q, reg, q->iobase + QUADSPI_FR); in fsl_qspi_irq_handler()
511 reg = qspi_readl(q, q->iobase + QUADSPI_MCR); in fsl_qspi_invalidate()
513 qspi_writel(q, reg, q->iobase + QUADSPI_MCR); in fsl_qspi_invalidate()
522 qspi_writel(q, reg, q->iobase + QUADSPI_MCR); in fsl_qspi_invalidate()
554 q->ahb_addr + q->selected * q->devtype_data->ahb_buf_size, in fsl_qspi_read_ahb()
664 qspi_writel(q, qspi_readl(q, base + QUADSPI_MCR) | in fsl_qspi_exec_op()
753 qspi_writel(q, qspi_readl(q, base + QUADSPI_FLSHCR) & in fsl_qspi_default_setup()
801 qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR); in fsl_qspi_default_setup()
883 q->ahb_addr = devm_ioremap(dev, q->memmap_phy, in fsl_qspi_probe()
[all …]
/linux/block/
A Dblk-pm.c31 q->dev = dev; in blk_pm_runtime_init()
32 q->rpm_status = RPM_ACTIVE; in blk_pm_runtime_init()
63 if (!q->dev) in blk_pre_runtime_suspend()
78 blk_set_pm_only(q); in blk_pre_runtime_suspend()
92 blk_mq_unfreeze_queue(q); in blk_pre_runtime_suspend()
100 blk_clear_pm_only(q); in blk_pre_runtime_suspend()
122 if (!q->dev) in blk_post_runtime_suspend()
135 blk_clear_pm_only(q); in blk_post_runtime_suspend()
152 if (!q->dev) in blk_pre_runtime_resume()
178 if (!q->dev) in blk_post_runtime_resume()
[all …]
/linux/drivers/infiniband/sw/rxe/
A Drxe_queue.h112 prod = q->index; in queue_get_producer()
135 cons = q->index; in queue_get_consumer()
193 prod = q->index; in queue_advance_producer()
195 q->index = prod; in queue_advance_producer()
223 cons = (q->index + 1) & q->index_mask; in queue_advance_consumer()
224 q->index = cons; in queue_advance_consumer()
255 return q->buf->data + (prod << q->log2_elem_size); in queue_producer_addr()
263 return q->buf->data + (cons << q->log2_elem_size); in queue_consumer_addr()
268 return q->buf->data + ((index & q->index_mask) in queue_addr_from_index()
275 return (((u8 *)addr - q->buf->data) >> q->log2_elem_size) in queue_index_from_addr()
[all …]
/linux/drivers/net/wireless/broadcom/b43/
A Dpio.c76 return q; in parse_cookie()
130 q = kzalloc(sizeof(*q), GFP_KERNEL); in b43_setup_pioqueue_tx()
131 if (!q) in b43_setup_pioqueue_tx()
156 return q; in b43_setup_pioqueue_tx()
164 q = kzalloc(sizeof(*q), GFP_KERNEL); in b43_setup_pioqueue_rx()
165 if (!q) in b43_setup_pioqueue_rx()
175 return q; in b43_setup_pioqueue_rx()
195 if (!q) in b43_destroy_pioqueue_tx()
204 if (!q) in b43_destroy_pioqueue_rx()
523 B43_WARN_ON(q->buffer_used > q->buffer_size); in b43_pio_tx()
[all …]
/linux/drivers/s390/cio/
A Dqdio_main.c149 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr, in qdio_do_eqbs()
150 q->first_to_check, count, q->irq_ptr->int_parm); in qdio_do_eqbs()
195 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr, in qdio_do_sqbs()
196 q->first_to_check, count, q->irq_ptr->int_parm); in qdio_do_sqbs()
307 return qdio_siga_sync(q, 0, q->mask); in qdio_sync_input_queue()
312 return qdio_siga_sync(q, q->mask, 0); in qdio_sync_output_queue()
358 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr); in qdio_siga_output()
466 inbound_handle_work(q, start, count, is_qebsm(q)); in get_inbound_buffer_frontier()
653 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr); in qdio_kick_outbound_q()
667 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr); in qdio_kick_outbound_q()
[all …]
/linux/drivers/net/wireless/mediatek/mt76/
A Ddma.c191 Q_WRITE(q, desc_base, q->desc_dma); in mt76_dma_sync_idx()
195 Q_WRITE(q, ring_size, q->ndesc); in mt76_dma_sync_idx()
196 q->head = Q_READ(q, dma_idx); in mt76_dma_sync_idx()
197 q->tail = q->head; in mt76_dma_sync_idx()
203 if (!q || !q->ndesc) in __mt76_dma_queue_reset()
245 desc = &q->desc[q->head]; in mt76_dma_add_rx_buf()
278 q->head = (q->head + 1) % q->ndesc; in mt76_dma_add_rx_buf()
303 next = (q->head + 1) % q->ndesc; in mt76_dma_add_buf()
378 Q_WRITE(q, cpu_idx, q->head); in mt76_dma_kick_queue()
387 if (!q || !q->ndesc) in mt76_dma_tx_cleanup()
[all …]
/linux/drivers/net/ethernet/chelsio/cxgb3/
A Dsge.c543 if (++q->pidx == q->size) { in refill_fl()
580 q->sdesc[q->pidx] = q->sdesc[idx]; in recycle_rx_buf()
587 if (++q->pidx == q->size) { in recycle_rx_buf()
647 memset(q, 0, sizeof(*q)); in t3_reset_qset()
1323 if (q->pidx >= q->size) { in t3_eth_xmit()
1324 q->pidx -= q->size; in t3_eth_xmit()
1500 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); in ctrl_xmit()
1533 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); in restart_ctrlq()
1723 if (q->pidx >= q->size) { in ofld_xmit()
1724 q->pidx -= q->size; in ofld_xmit()
[all …]

Completed in 104 milliseconds

12345678910>>...60