1#!/usr/bin/env python3 2# 3# Copyright (C) 2021-2022 Intel Corporation. 4# 5# SPDX-License-Identifier: BSD-3-Clause 6# 7 8import sys, os 9sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'library')) 10import acrn_config_utilities, board_cfg_lib, lib.error, lib.lib 11import re 12from collections import defaultdict 13from itertools import combinations 14from acrn_config_utilities import get_node 15 16LEGACY_IRQ_MAX = 16 17 18def get_native_valid_irq(): 19 """ 20 This is get available irq from board info file 21 :return: native available irq list 22 """ 23 val_irq = [] 24 irq_info_lines = board_cfg_lib.get_info(acrn_config_utilities.BOARD_INFO_FILE, "<AVAILABLE_IRQ_INFO>", "</AVAILABLE_IRQ_INFO>") 25 for irq_string in irq_info_lines: 26 val_irq = [int(x.strip()) for x in irq_string.split(',')] 27 return val_irq 28 29def alloc_standard_irq(io_port): 30 if io_port == "0x3F8" or io_port == "0x3E8": 31 return "4" 32 elif io_port == "0x2F8" or io_port == "0x2E8": 33 return "3" 34 else: 35 return "0" 36 37def alloc_irq(irq_list): 38 try: 39 irq = irq_list[0] 40 remove_irq(irq_list, irq) 41 return irq 42 except IndexError as e: 43 raise lib.error.ResourceError("Cannot allocate legacy irq, the available irq list: {}, {}".format(e, irq_list)) from e 44 45def remove_irq(irq_list, irq): 46 try: 47 irq_list.remove(irq) 48 except ValueError as e: 49 raise ValueError("Cannot remove irq:{} from available irq list:{}, {}". format(irq, e, irq_list)) from e 50 51def create_vuart_irq_node(etree, vm_id, load_order, vuart_id, irq): 52 allocation_vm_node = get_node(f"/acrn-config/vm[@id = '{vm_id}']", etree) 53 if allocation_vm_node is None: 54 allocation_vm_node = acrn_config_utilities.append_node("/acrn-config/vm", None, etree, id = vm_id) 55 if get_node("./load_order", allocation_vm_node) is None: 56 acrn_config_utilities.append_node("./load_order", load_order, allocation_vm_node) 57 if get_node(f"./legacy_vuart[@id = '{vuart_id}']", allocation_vm_node) is None: 58 acrn_config_utilities.append_node("./legacy_vuart", None, allocation_vm_node, id = vuart_id) 59 60 acrn_config_utilities.append_node(f"./legacy_vuart[@id = '{vuart_id}']/irq", irq, allocation_vm_node) 61 62def alloc_vuart_connection_irqs(board_etree, scenario_etree, allocation_etree): 63 native_ttys = lib.lib.get_native_ttys() 64 hv_debug_console = lib.lib.parse_hv_console(scenario_etree) 65 66 vm_node_list = scenario_etree.xpath("//vm") 67 68 for vm_node in vm_node_list: 69 load_order = get_node("./load_order/text()", vm_node) 70 irq_list = get_native_valid_irq() if load_order == "SERVICE_VM" else [f"{d}" for d in list(range(5,15))] 71 72 if load_order == "SERVICE_VM": 73 if 3 in irq_list: 74 remove_irq(irq_list, 3) 75 if 4 in irq_list: 76 remove_irq(irq_list, 4) 77 else: 78 if 14 in irq_list: 79 remove_irq(irq_list, 14) 80 vuart_id = 1 81 legacy_vuart_irq = "0" 82 vmname = get_node("./name/text()", vm_node) 83 84 vuart_connections = scenario_etree.xpath("//vuart_connection") 85 for connection in vuart_connections: 86 endpoint_list = connection.xpath(".//endpoint") 87 for endpoint in endpoint_list: 88 vm_name = get_node("./vm_name/text()", endpoint) 89 if vm_name == vmname: 90 vuart_type = get_node("./type/text()", connection) 91 if vuart_type == "legacy": 92 io_port = get_node("./io_port/text()", endpoint) 93 legacy_vuart_irq = alloc_standard_irq(io_port) 94 if legacy_vuart_irq == "0" and load_order != "SERVICE_VM": 95 legacy_vuart_irq = alloc_irq(irq_list) 96 else: 97 legacy_vuart_irq = alloc_irq(irq_list) 98 99 create_vuart_irq_node(allocation_etree, get_node("./@id", vm_node), load_order, str(vuart_id), legacy_vuart_irq) 100 vuart_id = vuart_id + 1 101 # Allocate irq for S5 vuart, we have to use the irq of COM2 102 if load_order != "SERVICE_VM": 103 legacy_vuart_irq = alloc_irq(irq_list) 104 create_vuart_irq_node(allocation_etree, get_node("./@id", vm_node), load_order, str(vuart_id), legacy_vuart_irq) 105 vuart_id = vuart_id + 1 106 107 user_vm_list = scenario_etree.xpath(f"//vm[load_order != 'SERVICE_VM']/name/text()") 108 service_vm_id = get_node(f"//vm[load_order = 'SERVICE_VM']/@id", scenario_etree) 109 service_vm_name = get_node(f"//vm[load_order = 'SERVICE_VM']/name/text()", scenario_etree) 110 service_vuart_list = scenario_etree.xpath(f"//endpoint[vm_name = '{service_vm_name}']") 111 if service_vm_id is not None: 112 for index in range(0, len(user_vm_list)): 113 if service_vuart_list is not None: 114 vuart_id = index + len(service_vuart_list) + 1 115 else: 116 vuart_id = index + 1 117 118 create_vuart_irq_node(allocation_etree, service_vm_id, "SERVICE_VM", str(vuart_id), "0") 119 120def get_irqs_of_device(device_node): 121 irqs = set() 122 123 # IRQs in ACPI 124 for res in device_node.xpath("resource[@type='irq']"): 125 irqs.update(set(map(int, res.get("int").split(", ")))) 126 127 # PCI interrupt pin 128 for res in device_node.xpath("resource[@type='interrupt_pin']"): 129 source = res.get("source", None) 130 if source is not None: 131 if source.isdigit(): 132 # Interrupts from the global interrupt pool 133 irqs.add(int(source)) 134 else: 135 # Interrupts from another device 136 index = res.get("index", "0") 137 irq = get_node(f"//device[acpi_object='{source}']/resource[@id='res{index}' and @type='irq']/@int", device_node.getroottree()) 138 if irq is not None: 139 irqs.add(int(irq)) 140 141 return irqs 142 143def alloc_device_irqs(board_etree, scenario_etree, allocation_etree): 144 service_vm_id = -1 145 irq_allocation = defaultdict(lambda: defaultdict(lambda: [])) # vm_id -> irq -> [device] 146 147 # Collect the list of devices that have to use INTx, excluding legacy UART which is to be emulated. 148 device_nodes = set(board_etree.xpath("//device[count(resource[@type='irq' or @type='interrupt_pin']) > 0 and count(capability[@id='MSI' or @id='MSI-X']) = 0]")) 149 uart_nodes = set(board_etree.xpath("//device[@id='PNP0501']")) 150 device_nodes -= uart_nodes 151 152 # 153 # Identify the interrupt lines each pre-launched VM uses 154 # 155 for vm in scenario_etree.xpath("//vm"): 156 load_order = vm.find("load_order").text 157 vm_id = int(vm.get("id")) 158 if lib.lib.is_pre_launched_vm(load_order): 159 pt_intx_text = get_node("pt_intx/text()", vm) 160 if pt_intx_text is not None: 161 pt_intx_mapping = dict(eval(f"[{pt_intx_text.replace(')(', '), (')}]")) 162 for irq in pt_intx_mapping.keys(): 163 irq_allocation[vm_id][irq].append("(Explicitly assigned in scenario configuration)") 164 for pci_dev in vm.xpath("pci_devs/pci_dev/text()"): 165 bdf = lib.lib.BusDevFunc.from_str(pci_dev.split(" ")[0]) 166 address = hex((bdf.dev << 16) | (bdf.func)) 167 device_node = get_node(f"//bus[@address='{hex(bdf.bus)}']/device[@address='{address}']", board_etree) 168 if device_node in device_nodes: 169 irqs = get_irqs_of_device(device_node) 170 for irq in irqs: 171 irq_allocation[vm_id][irq].append(pci_dev) 172 device_nodes.discard(device_node) 173 174 # Raise error when any pre-launched VM with LAPIC passthrough requires any interrupt line. 175 lapic_passthru_flag = get_node("lapic_passthrough[text() = 'y']", vm) 176 if lapic_passthru_flag is not None and irq_allocation[vm_id]: 177 for irq, devices in irq_allocation[vm_id].items(): 178 print(f"Interrupt line {irq} is used by the following device(s).") 179 for device in devices: 180 print(f"\t{device}") 181 raise lib.error.ResourceError(f"Pre-launched VM {vm_id} with lapic_passthrough flag cannot use interrupt lines.") 182 elif lib.lib.is_service_vm(load_order): 183 service_vm_id = vm_id 184 185 # 186 # Detect interrupt line conflicts 187 # 188 conflicts = defaultdict(lambda: defaultdict(lambda: set())) # irq -> vm_id -> devices 189 190 # If a service VM exists, collect its interrupt lines as well 191 if service_vm_id >= 0: 192 # Collect the interrupt lines that may be used by the service VM 193 for device_node in device_nodes: 194 acpi_object = device_node.find("acpi_object") 195 description = "" 196 if acpi_object is not None: 197 description = acpi_object.text 198 description = device_node.get("description", description) 199 200 # Guess BDF of the device 201 bus = device_node.getparent() 202 if bus.tag == "bus" and bus.get("type") == "pci" and device_node.get("address") is not None: 203 bus_number = int(bus.get("address"), 16) 204 address = int(device_node.get("address"), 16) 205 device_number = address >> 16 206 function_number = address & 0xffff 207 description = f"{bus_number:02x}:{device_number:02x}.{function_number} {description}" 208 209 for irq in get_irqs_of_device(device_node): 210 irq_allocation[service_vm_id][irq].append(description) 211 212 # Identify and report conflicts among interrupt lines of the VMs 213 for vm1, vm2 in combinations(irq_allocation.keys(), 2): 214 common_irqs = set(irq_allocation[vm1].keys()) & set(irq_allocation[vm2].keys()) 215 for irq in common_irqs: 216 conflicts[irq][vm1].update(set(irq_allocation[vm1][irq])) 217 conflicts[irq][vm2].update(set(irq_allocation[vm2][irq])) 218 219 if conflicts: 220 print("Interrupt line conflicts detected!") 221 for irq, vm_devices in sorted(conflicts.items()): 222 print(f"Interrupt line {irq} is shared by the following devices.") 223 for vm_id, devices in vm_devices.items(): 224 for device in sorted(devices): 225 print(f"\tVM {vm_id}: {device}") 226 raise lib.error.ResourceError(f"VMs have conflicting interrupt lines.") 227 228 # 229 # Dump allocations to allocation_etree. The virtual interrupt line is the same as the physical one unless otherwise 230 # stated in the scenario configuration. 231 # 232 for vm_id, alloc in irq_allocation.items(): 233 vm_node = get_node(f"/acrn-config/vm[@id = '{vm_id}']", allocation_etree) 234 if vm_node is None: 235 vm_node = acrn_config_utilities.append_node("/acrn-config/vm", None, allocation_etree, id = str(vm_id)) 236 pt_intx_text = get_node(f"//vm[@id='{vm_id}']/pt_intx/text()", scenario_etree) 237 pt_intx_mapping = dict(eval(f"[{pt_intx_text.replace(')(', '), (')}]")) if pt_intx_text is not None else {} 238 for irq, devs in alloc.items(): 239 for dev in devs: 240 if dev.startswith("("): # Allocation in the scenario configuration need not go to allocation.xml 241 continue 242 bdf = dev.split(" ")[0] 243 dev_name = f"PTDEV_{bdf}" 244 dev_node = get_node(f"device[@name = '{dev_name}']", vm_node) 245 if dev_node is None: 246 dev_node = acrn_config_utilities.append_node("./device", None, vm_node, name = dev_name) 247 pt_intx_node = get_node(f"pt_intx", dev_node) 248 virq = pt_intx_mapping.get(irq, irq) 249 if pt_intx_node is None: 250 acrn_config_utilities.append_node(f"./pt_intx", f"({irq}, {virq})", dev_node) 251 else: 252 pt_intx_node.text += f" ({irq}, {virq})" 253 254def fn(board_etree, scenario_etree, allocation_etree): 255 alloc_vuart_connection_irqs(board_etree, scenario_etree, allocation_etree) 256 alloc_device_irqs(board_etree, scenario_etree, allocation_etree) 257