| /drivers/net/ipa/data/ |
| A D | ipa_data-v4.2.c | 108 .qmap = true, 133 .qmap = true,
|
| A D | ipa_data-v4.11.c | 112 .qmap = true, 136 .qmap = true,
|
| A D | ipa_data-v4.7.c | 108 .qmap = true, 132 .qmap = true,
|
| A D | ipa_data-v3.5.1.c | 118 .qmap = true, 143 .qmap = true,
|
| A D | ipa_data-v4.5.c | 121 .qmap = true, 145 .qmap = true,
|
| A D | ipa_data-v4.9.c | 113 .qmap = true, 137 .qmap = true,
|
| A D | ipa_data-v5.0.c | 125 .qmap = true, 149 .qmap = true,
|
| A D | ipa_data-v5.5.c | 125 .qmap = true, 149 .qmap = true,
|
| A D | ipa_data-v3.1.c | 127 .qmap = true, 151 .qmap = true,
|
| /drivers/net/ethernet/broadcom/bnxt/ |
| A D | bnxt_dcb.c | 195 unsigned long qmap = 0; in bnxt_queue_remap() local 204 __set_bit(j, &qmap); in bnxt_queue_remap() 216 j = find_next_zero_bit(&qmap, max, j); in bnxt_queue_remap() 218 __set_bit(j, &qmap); in bnxt_queue_remap()
|
| /drivers/net/ethernet/intel/ice/ |
| A D | ice_lib.c | 1050 qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset); in ice_vsi_setup_q_map() 1051 qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow); in ice_vsi_setup_q_map() 1054 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); in ice_vsi_setup_q_map() 1187 u16 qcount, qmap; in ice_chnl_vsi_setup_q_map() local 1194 qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset); in ice_chnl_vsi_setup_q_map() 1195 qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow); in ice_chnl_vsi_setup_q_map() 1197 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in ice_chnl_vsi_setup_q_map() 3260 u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap; in ice_vsi_setup_q_map_mqprio() local 3270 qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, tc0_offset); in ice_vsi_setup_q_map_mqprio() 3271 qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow); in ice_vsi_setup_q_map_mqprio() [all …]
|
| A D | ice_virtchnl.c | 1712 unsigned long qmap; in ice_cfg_interrupt() local 1717 qmap = map->rxq_map; in ice_cfg_interrupt() 1718 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) { in ice_cfg_interrupt() 1732 qmap = map->txq_map; in ice_cfg_interrupt() 1733 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) { in ice_cfg_interrupt()
|
| /drivers/net/ipa/ |
| A D | ipa_endpoint.h | 113 bool qmap; member
|
| A D | ipa_endpoint.c | 793 if (endpoint->config.qmap) { in ipa_endpoint_init_hdr() 838 if (endpoint->config.qmap) { in ipa_endpoint_init_hdr_ext() 866 if (endpoint->config.qmap && !endpoint->toward_ipa) { in ipa_endpoint_init_hdr_ext() 896 if (endpoint->config.qmap) in ipa_endpoint_init_hdr_metadata_mask()
|
| A D | ipa_modem.c | 137 if (endpoint->config.qmap && skb->protocol != htons(ETH_P_MAP)) in ipa_start_xmit()
|
| /drivers/scsi/hisi_sas/ |
| A D | hisi_sas_v3_hw.c | 3370 struct blk_mq_queue_map *qmap; in hisi_sas_map_queues() local 3374 qmap = &shost->tag_set.map[i]; in hisi_sas_map_queues() 3376 qmap->nr_queues = hisi_hba->cq_nvecs; in hisi_sas_map_queues() 3378 qmap->nr_queues = hisi_hba->iopoll_q_cnt; in hisi_sas_map_queues() 3380 qmap->nr_queues = 0; in hisi_sas_map_queues() 3385 if (!qmap->nr_queues) in hisi_sas_map_queues() 3387 qmap->queue_offset = qoff; in hisi_sas_map_queues() 3389 blk_mq_map_queues(qmap); in hisi_sas_map_queues() 3391 blk_mq_map_hw_queues(qmap, hisi_hba->dev, in hisi_sas_map_queues() 3393 qoff += qmap->nr_queues; in hisi_sas_map_queues()
|
| A D | hisi_sas_v2_hw.c | 3559 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; in map_queues_v2_hw() local 3563 for (queue = 0; queue < qmap->nr_queues; queue++) { in map_queues_v2_hw() 3569 qmap->mq_map[cpu] = qmap->queue_offset + queue; in map_queues_v2_hw()
|
| A D | hisi_sas_main.c | 579 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; in hisi_sas_queue_command() local 581 queue = qmap->mq_map[raw_smp_processor_id()]; in hisi_sas_queue_command()
|
| /drivers/scsi/fnic/ |
| A D | fnic_main.c | 682 struct blk_mq_queue_map *qmap = &host->tag_set.map[HCTX_TYPE_DEFAULT]; in fnic_mq_map_queues_cpus() local 691 "qmap->nr_queues: %d\n", qmap->nr_queues); in fnic_mq_map_queues_cpus() 699 blk_mq_map_hw_queues(qmap, &l_pdev->dev, FNIC_PCI_OFFSET); in fnic_mq_map_queues_cpus()
|
| /drivers/scsi/pm8001/ |
| A D | pm8001_init.c | 105 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; in pm8001_map_queues() local 108 blk_mq_map_hw_queues(qmap, &pm8001_ha->pdev->dev, 1); in pm8001_map_queues() 112 blk_mq_map_queues(qmap); in pm8001_map_queues()
|
| /drivers/net/ethernet/intel/i40e/ |
| A D | i40e_main.c | 1919 u16 qcount = 0, max_qcount, qmap, sections = 0; in i40e_vsi_setup_queue_map_mqprio() local 1935 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | in i40e_vsi_setup_queue_map_mqprio() 1965 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in i40e_vsi_setup_queue_map_mqprio() 2015 u16 qmap; in i40e_vsi_setup_queue_map() local 2109 qmap = in i40e_vsi_setup_queue_map() 2123 qmap = 0; in i40e_vsi_setup_queue_map() 2125 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); in i40e_vsi_setup_queue_map() 6219 u16 qcount, qmap, sections = 0; in i40e_channel_setup_queue_map() local 6238 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in i40e_channel_setup_queue_map() 7878 u16 sections, qmap, num_qps; in i40e_setup_macvlans() local [all …]
|
| /drivers/net/ethernet/marvell/mvpp2/ |
| A D | mvpp2_main.c | 2348 u32 qmap; in mvpp2_egress_enable() local 2353 qmap = 0; in mvpp2_egress_enable() 2358 qmap |= (1 << queue); in mvpp2_egress_enable() 2362 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); in mvpp2_egress_enable()
|
| /drivers/scsi/qla2xxx/ |
| A D | qla_os.c | 8056 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; in qla2xxx_map_queues() local 8059 blk_mq_map_queues(qmap); in qla2xxx_map_queues() 8061 blk_mq_map_hw_queues(qmap, &vha->hw->pdev->dev, in qla2xxx_map_queues()
|