Lines Matching refs:iommu

56     u16 bdf, u16 alias_id, u8 flags, struct amd_iommu *iommu)  in add_ivrs_mapping_entry()  argument
58 struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(iommu->seg); in add_ivrs_mapping_entry()
85 ivrs_mappings[bdf].iommu = iommu; in add_ivrs_mapping_entry()
91 struct amd_iommu *iommu; in find_iommu_from_bdf_cap() local
93 for_each_amd_iommu ( iommu ) in find_iommu_from_bdf_cap()
94 if ( (iommu->seg == seg) && (iommu->bdf == bdf) && in find_iommu_from_bdf_cap()
95 (iommu->cap_offset == cap_offset) ) in find_iommu_from_bdf_cap()
96 return iommu; in find_iommu_from_bdf_cap()
102 struct amd_iommu *iommu, uint64_t base, uint64_t limit) in reserve_iommu_exclusion_range() argument
105 if ( iommu->exclusion_enable ) in reserve_iommu_exclusion_range()
107 if ( iommu->exclusion_base < base ) in reserve_iommu_exclusion_range()
108 base = iommu->exclusion_base; in reserve_iommu_exclusion_range()
109 if ( iommu->exclusion_limit > limit ) in reserve_iommu_exclusion_range()
110 limit = iommu->exclusion_limit; in reserve_iommu_exclusion_range()
113 iommu->exclusion_enable = IOMMU_CONTROL_ENABLED; in reserve_iommu_exclusion_range()
114 iommu->exclusion_base = base; in reserve_iommu_exclusion_range()
115 iommu->exclusion_limit = limit; in reserve_iommu_exclusion_range()
119 struct amd_iommu *iommu, in reserve_iommu_exclusion_range_all() argument
122 reserve_iommu_exclusion_range(iommu, base, limit); in reserve_iommu_exclusion_range_all()
123 iommu->exclusion_allow_all = IOMMU_CONTROL_ENABLED; in reserve_iommu_exclusion_range_all()
159 struct amd_iommu *iommu; in register_exclusion_range_for_all_devices() local
181 for_each_amd_iommu( iommu ) in register_exclusion_range_for_all_devices()
182 reserve_iommu_exclusion_range_all(iommu, base, limit); in register_exclusion_range_for_all_devices()
194 struct amd_iommu *iommu; in register_exclusion_range_for_device() local
197 iommu = find_iommu_for_device(seg, bdf); in register_exclusion_range_for_device()
198 if ( !iommu ) in register_exclusion_range_for_device()
225 reserve_iommu_exclusion_range(iommu, base, limit); in register_exclusion_range_for_device()
234 struct amd_iommu *iommu, in register_exclusion_range_for_iommu_devices() argument
254 if ( iommu == find_iommu_for_device(iommu->seg, bdf) ) in register_exclusion_range_for_iommu_devices()
256 reserve_unity_map_for_device(iommu->seg, bdf, base, length, in register_exclusion_range_for_iommu_devices()
258 req = get_ivrs_mappings(iommu->seg)[bdf].dte_requestor_id; in register_exclusion_range_for_iommu_devices()
259 reserve_unity_map_for_device(iommu->seg, req, base, length, in register_exclusion_range_for_iommu_devices()
270 reserve_iommu_exclusion_range_all(iommu, base, limit); in register_exclusion_range_for_iommu_devices()
325 struct amd_iommu *iommu; in parse_ivmd_device_iommu() local
328 iommu = find_iommu_from_bdf_cap(seg, ivmd_block->header.device_id, in parse_ivmd_device_iommu()
330 if ( !iommu ) in parse_ivmd_device_iommu()
338 iommu, base, limit, iw, ir); in parse_ivmd_device_iommu()
412 const struct acpi_ivrs_device4 *select, struct amd_iommu *iommu) in parse_ivhd_device_select() argument
423 add_ivrs_mapping_entry(bdf, bdf, select->header.data_setting, iommu); in parse_ivhd_device_select()
430 u16 header_length, u16 block_length, struct amd_iommu *iommu) in parse_ivhd_device_range() argument
469 iommu); in parse_ivhd_device_range()
476 u16 header_length, u16 block_length, struct amd_iommu *iommu) in parse_ivhd_device_alias() argument
503 add_ivrs_mapping_entry(bdf, alias_id, alias->header.data_setting, iommu); in parse_ivhd_device_alias()
510 u16 header_length, u16 block_length, struct amd_iommu *iommu) in parse_ivhd_device_alias_range() argument
558 iommu); in parse_ivhd_device_alias_range()
565 u16 header_length, u16 block_length, struct amd_iommu *iommu) in parse_ivhd_device_extended() argument
583 add_ivrs_mapping_entry(bdf, bdf, ext->header.data_setting, iommu); in parse_ivhd_device_extended()
590 u16 header_length, u16 block_length, struct amd_iommu *iommu) in parse_ivhd_device_extended_range() argument
630 iommu); in parse_ivhd_device_extended_range()
701 u16 header_length, u16 block_length, struct amd_iommu *iommu) in parse_ivhd_device_special() argument
723 add_ivrs_mapping_entry(bdf, bdf, special->header.data_setting, iommu); in parse_ivhd_device_special()
860 struct amd_iommu *iommu; in parse_ivhd_block() local
868 iommu = find_iommu_from_bdf_cap(ivhd_block->pci_segment_group, in parse_ivhd_block()
871 if ( !iommu ) in parse_ivhd_block()
903 dev_length = parse_ivhd_device_select(&ivhd_device->select, iommu); in parse_ivhd_block()
908 ivhd_block->header.length, block_length, iommu); in parse_ivhd_block()
913 ivhd_block->header.length, block_length, iommu); in parse_ivhd_block()
918 ivhd_block->header.length, block_length, iommu); in parse_ivhd_block()
923 ivhd_block->header.length, block_length, iommu); in parse_ivhd_block()
928 ivhd_block->header.length, block_length, iommu); in parse_ivhd_block()
933 ivhd_block->header.length, block_length, iommu); in parse_ivhd_block()