| /drivers/net/ipa/ |
| A D | gsi.c | 513 struct gsi *gsi = channel->gsi; in gsi_channel_state() local 528 struct gsi *gsi = channel->gsi; in gsi_channel_command() local 829 struct gsi *gsi = channel->gsi; in gsi_channel_program() local 926 struct gsi *gsi = channel->gsi; in __gsi_channel_start() local 978 struct gsi *gsi = channel->gsi; in __gsi_channel_stop() local 1084 struct gsi *gsi = trans->gsi; in gsi_trans_tx_queued() local 1117 struct gsi *gsi = trans->gsi; in gsi_trans_tx_completed() local 1590 struct gsi *gsi = channel->gsi; in gsi_channel_doorbell() local 1604 struct gsi *gsi = channel->gsi; in gsi_channel_update() local 2113 struct gsi *gsi = channel->gsi; in gsi_channel_evt_ring_init() local [all …]
|
| A D | gsi.h | 26 struct gsi; 100 struct gsi *gsi; member 136 struct gsi { struct 166 int gsi_setup(struct gsi *gsi); 172 void gsi_teardown(struct gsi *gsi); 190 int gsi_channel_start(struct gsi *gsi, u32 channel_id); 199 int gsi_channel_stop(struct gsi *gsi, u32 channel_id); 228 void gsi_suspend(struct gsi *gsi); 234 void gsi_resume(struct gsi *gsi); 252 int gsi_channel_resume(struct gsi *gsi, u32 channel_id); [all …]
|
| A D | gsi_reg.c | 13 static bool gsi_reg_id_valid(struct gsi *gsi, enum gsi_reg_id reg_id) in gsi_reg_id_valid() argument 82 const struct reg *gsi_reg(struct gsi *gsi, enum gsi_reg_id reg_id) in gsi_reg() argument 90 static const struct regs *gsi_regs(struct gsi *gsi) in gsi_regs() argument 122 int gsi_reg_init(struct gsi *gsi, struct platform_device *pdev) in gsi_reg_init() argument 141 gsi->regs = gsi_regs(gsi); in gsi_reg_init() 142 if (!gsi->regs) { in gsi_reg_init() 148 if (!gsi->virt) { in gsi_reg_init() 157 void gsi_reg_exit(struct gsi *gsi) in gsi_reg_exit() argument 159 iounmap(gsi->virt); in gsi_reg_exit() 160 gsi->virt = NULL; in gsi_reg_exit() [all …]
|
| A D | ipa_gsi.c | 18 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_gsi_trans_complete() 25 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_gsi_trans_release() 30 void ipa_gsi_channel_tx_queued(struct gsi *gsi, u32 channel_id, u32 count, in ipa_gsi_channel_tx_queued() argument 33 struct ipa *ipa = container_of(gsi, struct ipa, gsi); in ipa_gsi_channel_tx_queued() 41 void ipa_gsi_channel_tx_completed(struct gsi *gsi, u32 channel_id, u32 count, in ipa_gsi_channel_tx_completed() argument 44 struct ipa *ipa = container_of(gsi, struct ipa, gsi); in ipa_gsi_channel_tx_completed()
|
| A D | gsi_trans.h | 21 struct gsi; 51 struct gsi *gsi; member 142 bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id); 154 struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id, 217 int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr); 227 void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id);
|
| A D | gsi_trans.c | 324 bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id) in gsi_channel_trans_idle() argument 326 u32 tre_max = gsi_channel_tre_max(gsi, channel_id); in gsi_channel_trans_idle() 329 trans_info = &gsi->channel[channel_id].trans_info; in gsi_channel_trans_idle() 335 struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id, in gsi_channel_trans_alloc() argument 339 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_trans_alloc() 358 trans->gsi = gsi; in gsi_channel_trans_alloc() 671 int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr) in gsi_trans_read_byte() argument 673 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_trans_read_byte() 696 void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id) in gsi_trans_read_byte_done() argument 698 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_trans_read_byte_done() [all …]
|
| A D | ipa_cmd.c | 329 struct device *dev = channel->gsi->dev; in ipa_cmd_pool_init() 343 struct device *dev = channel->gsi->dev; in ipa_cmd_pool_exit() 355 trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info; in ipa_cmd_payload_alloc() 366 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_table_init_add() 404 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_hdr_init_local_add() 433 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_register_write_add() 490 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_ip_packet_init_add() 515 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_dma_shared_mem_add() 555 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_ip_tag_status_add() 573 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_transfer_add() [all …]
|
| A D | ipa_gsi.h | 11 struct gsi; 44 void ipa_gsi_channel_tx_queued(struct gsi *gsi, u32 channel_id, u32 count, 57 void ipa_gsi_channel_tx_completed(struct gsi *gsi, u32 channel_id, u32 count,
|
| A D | gsi_reg.h | 15 struct gsi; 365 const struct reg *gsi_reg(struct gsi *gsi, enum gsi_reg_id reg_id); 375 int gsi_reg_init(struct gsi *gsi, struct platform_device *pdev); 381 void gsi_reg_exit(struct gsi *gsi);
|
| A D | ipa_endpoint.c | 436 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_trans_alloc() local 593 gsi_modem_channel_flow_control(&ipa->gsi, in ipa_endpoint_modem_pause_all() 1674 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_reset_rx_aggr() local 1729 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr() 1745 (void)gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr() 1772 gsi_channel_reset(&ipa->gsi, channel_id, true); in ipa_endpoint_reset() 1818 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_enable_one() local 1844 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_disable_one() local 1867 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_suspend_one() local 1887 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_resume_one() local [all …]
|
| A D | gsi_private.h | 13 struct gsi; 81 int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id);
|
| A D | ipa.h | 74 struct gsi gsi; member
|
| A D | ipa_main.c | 118 ret = gsi_setup(&ipa->gsi); in ipa_setup() 168 gsi_teardown(&ipa->gsi); in ipa_setup() 192 gsi_teardown(&ipa->gsi); in ipa_teardown() 859 ret = gsi_init(&ipa->gsi, pdev, ipa->version, data->endpoint_count, in ipa_probe() 921 gsi_exit(&ipa->gsi); in ipa_probe() 987 gsi_exit(&ipa->gsi); in ipa_remove()
|
| /drivers/infiniband/hw/mlx5/ |
| A D | gsi.c | 49 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi; in generate_completions() local 106 gsi = &mqp->gsi; in mlx5_ib_create_gsi() 156 dev->devr.ports[attr->port_num - 1].gsi = gsi; in mlx5_ib_create_gsi() 171 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi; in mlx5_ib_destroy_gsi() local 325 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi; in mlx5_ib_gsi_modify_qp() local 350 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi; in mlx5_ib_gsi_query_qp() local 362 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi; in mlx5_ib_add_outstanding_wr() local 366 if (gsi->outstanding_pi == gsi->outstanding_ci + gsi->cap.max_send_wr) { in mlx5_ib_add_outstanding_wr() 371 gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi % in mlx5_ib_add_outstanding_wr() 433 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi; in mlx5_ib_gsi_post_send() local [all …]
|
| /drivers/perf/ |
| A D | arm_pmu_acpi.c | 25 int gsi, trigger; in arm_pmu_acpi_register_irq() local 29 gsi = gicc->performance_interrupt; in arm_pmu_acpi_register_irq() 38 if (!gsi) in arm_pmu_acpi_register_irq() 63 int gsi; in arm_pmu_acpi_unregister_irq() local 68 if (gsi) in arm_pmu_acpi_unregister_irq() 69 acpi_unregister_gsi(gsi); in arm_pmu_acpi_unregister_irq() 77 u16 this_gsi = 0, gsi = 0; in arm_acpi_register_pmu_device() local 98 return gsi ? -ENXIO : 0; in arm_acpi_register_pmu_device() 102 if (!gsi) { in arm_acpi_register_pmu_device() 104 gsi = this_gsi; in arm_acpi_register_pmu_device() [all …]
|
| /drivers/acpi/ |
| A D | irq.c | 16 static u32 (*acpi_gsi_to_irq_fallback)(u32 gsi); 28 int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) in acpi_gsi_to_irq() argument 32 d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(gsi), in acpi_gsi_to_irq() 34 *irq = irq_find_mapping(d, gsi); in acpi_gsi_to_irq() 40 *irq = acpi_gsi_to_irq_fallback(gsi); in acpi_gsi_to_irq() 62 fwspec.fwnode = acpi_get_gsi_domain_id(gsi); in acpi_register_gsi() 68 fwspec.param[0] = gsi; in acpi_register_gsi() 84 void acpi_unregister_gsi(u32 gsi) in acpi_unregister_gsi() argument 94 irq = irq_find_mapping(d, gsi); in acpi_unregister_gsi() 113 u32 gsi) in acpi_get_irq_source_fwhandle() argument [all …]
|
| A D | evged.c | 51 unsigned int gsi; member 61 acpi_ret = acpi_execute_simple_method(event->handle, NULL, event->gsi); in acpi_ged_irq_handler() 73 unsigned int gsi; in acpi_ged_request_interrupt() local 93 gsi = p->interrupts[0]; in acpi_ged_request_interrupt() 96 gsi = pext->interrupts[0]; in acpi_ged_request_interrupt() 102 switch (gsi) { in acpi_ged_request_interrupt() 105 trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi); in acpi_ged_request_interrupt() 122 event->gsi = gsi; in acpi_ged_request_interrupt() 136 dev_dbg(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq); in acpi_ged_request_interrupt() 172 event->gsi, event->irq); in ged_shutdown()
|
| A D | pci_irq.c | 386 int gsi; in acpi_pci_irq_enable() local 426 gsi = acpi_pci_link_allocate_irq(entry->link, in acpi_pci_irq_enable() 431 gsi = entry->index; in acpi_pci_irq_enable() 433 gsi = -1; in acpi_pci_irq_enable() 435 if (gsi < 0) { in acpi_pci_irq_enable() 469 pin_name(pin), link_desc, gsi, in acpi_pci_irq_enable() 480 int gsi; in acpi_pci_irq_disable() local 500 gsi = acpi_pci_link_free_irq(entry->link); in acpi_pci_irq_disable() 502 gsi = entry->index; in acpi_pci_irq_disable() 512 if (gsi >= 0) { in acpi_pci_irq_disable() [all …]
|
| A D | resource.c | 712 if (entry->irq == gsi && in acpi_dev_irq_override() 726 if (gsi != 1 && gsi != 12) in acpi_dev_irq_override() 730 if (acpi_int_src_ovr[gsi]) in acpi_dev_irq_override() 745 static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, in acpi_dev_get_irqresource() argument 751 if (!valid_IRQ(gsi)) { in acpi_dev_get_irqresource() 752 irqresource_disabled(res, gsi); in acpi_dev_get_irqresource() 767 acpi_dev_irq_override(gsi, triggering, polarity, shareable) && in acpi_dev_get_irqresource() 768 !acpi_get_override_irq(gsi, &t, &p)) { in acpi_dev_get_irqresource() 773 pr_warn("ACPI: IRQ %d override to %s%s, %s%s\n", gsi, in acpi_dev_get_irqresource() 784 irq = acpi_register_gsi(NULL, gsi, triggering, polarity); in acpi_dev_get_irqresource() [all …]
|
| /drivers/irqchip/ |
| A D | irq-loongarch-cpu.c | 21 static u32 lpic_gsi_to_irq(u32 gsi) in lpic_gsi_to_irq() argument 26 if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ) in lpic_gsi_to_irq() 27 irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH); in lpic_gsi_to_irq() 32 static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi) in lpic_get_gsi_domain_id() argument 37 switch (gsi) { in lpic_get_gsi_domain_id() 49 id = find_pch_pic(gsi); in lpic_get_gsi_domain_id()
|
| /drivers/xen/ |
| A D | acpi.c | 92 int gsi; in xen_acpi_get_gsi_info() local 109 gsi = acpi_pci_link_allocate_irq(entry->link, in xen_acpi_get_gsi_info() 114 gsi = entry->index; in xen_acpi_get_gsi_info() 116 gsi = -1; in xen_acpi_get_gsi_info() 118 if (gsi < 0) in xen_acpi_get_gsi_info() 121 *gsi_out = gsi; in xen_acpi_get_gsi_info()
|
| /drivers/hv/ |
| A D | mshv_irq.c | 30 if (ue[i].gsi >= MSHV_MAX_GUEST_IRQS) in mshv_update_routing_table() 36 nr_rt_entries = max(nr_rt_entries, ue[i].gsi); in mshv_update_routing_table() 49 girq = &new->mshv_girq_info_tbl[ue[i].gsi]; in mshv_update_routing_table() 59 girq->guest_irq_num = ue[i].gsi; in mshv_update_routing_table()
|
| /drivers/platform/x86/ |
| A D | intel_scu_wdt.c | 31 int gsi = TANGIER_EXT_TIMER0_MSI; in tangier_probe() local 39 irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info); in tangier_probe() 41 dev_warn(&pdev->dev, "cannot find interrupt %d in ioapic\n", gsi); in tangier_probe()
|
| /drivers/acpi/riscv/ |
| A D | irq.c | 94 struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi) in riscv_acpi_get_gsi_domain_id() argument 102 if (gsi >= ext_intc_element->gsi_base && in riscv_acpi_get_gsi_domain_id() 103 gsi < (ext_intc_element->gsi_base + ext_intc_element->nr_irqs)) { in riscv_acpi_get_gsi_domain_id() 188 static acpi_handle riscv_acpi_get_gsi_handle(u32 gsi) in riscv_acpi_get_gsi_handle() argument 195 if (gsi >= ext_intc_element->gsi_base && in riscv_acpi_get_gsi_handle() 196 gsi < (ext_intc_element->gsi_base + ext_intc_element->nr_irqs)) in riscv_acpi_get_gsi_handle()
|
| /drivers/xen/events/ |
| A D | events_base.c | 124 unsigned short gsi; member 392 info->u.pirq.gsi = gsi; in xen_irq_info_pirq_setup() 761 if (gsi < nr_legacy_irqs()) in xen_allocate_irq_gsi() 762 irq = gsi; in xen_allocate_irq_gsi() 942 if (info->u.pirq.gsi == gsi) in xen_irq_from_gsi() 1023 __func__, ret, gsi); in xen_bind_pirq_gsi_to_irq() 1918 int pirq, rc, irq, gsi; in restore_pirqs() local 1927 gsi = info->u.pirq.gsi; in restore_pirqs() 1932 if (!gsi) in restore_pirqs() 1937 map_irq.index = gsi; in restore_pirqs() [all …]
|