summaryrefslogtreecommitdiffstats
path: root/src/soc/amd/common
diff options
context:
space:
mode:
authorFelix Held <felix-coreboot@felixheld.de>2024-01-11 22:26:18 +0100
committerFelix Held <felix-coreboot@felixheld.de>2024-01-16 22:59:55 +0000
commit3b5b66d82954e026a91a1eff833fa7f652fed629 (patch)
treec7ff2cb87807e204d6f9e04e1cae14516eae0801 /src/soc/amd/common
parent090ea7ab8fceae54488620160aa95da4292d663f (diff)
downloadcoreboot-3b5b66d82954e026a91a1eff833fa7f652fed629.tar.gz
coreboot-3b5b66d82954e026a91a1eff833fa7f652fed629.tar.bz2
coreboot-3b5b66d82954e026a91a1eff833fa7f652fed629.zip
device: Add support for multiple PCI segment groups
Add initial support for multiple PCI segment groups. Instead of modifying secondary in the bus struct introduce a new segment_group struct element and keep existing common code. Since all platforms currently only use 1 segment this is not a functional change. On platforms that support more than 1 segment the segment has to be set when creating the PCI domain. Signed-off-by: Patrick Rudolph <patrick.rudolph@9elements.com> Signed-off-by: Felix Held <felix-coreboot@felixheld.de> Change-Id: Ied3313c41896362dd989ee2ab1b1bcdced840aa8 Reviewed-on: https://review.coreboot.org/c/coreboot/+/79927 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Nico Huber <nico.h@gmx.de> Reviewed-by: Martin Roth <martin.roth@amd.corp-partner.google.com>
Diffstat (limited to 'src/soc/amd/common')
-rw-r--r--src/soc/amd/common/block/acpi/ivrs.c6
-rw-r--r--src/soc/amd/common/block/data_fabric/domain.c23
2 files changed, 16 insertions, 13 deletions
diff --git a/src/soc/amd/common/block/acpi/ivrs.c b/src/soc/amd/common/block/acpi/ivrs.c
index 140968c673d3..605c4f56b3b2 100644
--- a/src/soc/amd/common/block/acpi/ivrs.c
+++ b/src/soc/amd/common/block/acpi/ivrs.c
@@ -218,7 +218,7 @@ static unsigned long acpi_fill_ivrs40(unsigned long current, acpi_ivrs_ivhd_t *i
ivhd_40->capability_offset = pci_find_capability(iommu_dev, IOMMU_CAP_ID);
ivhd_40->iommu_base_low = ivhd->iommu_base_low;
ivhd_40->iommu_base_high = ivhd->iommu_base_high;
- ivhd_40->pci_segment_group = 0x0000;
+ ivhd_40->pci_segment_group = nb_dev->bus->segment_group;
ivhd_40->iommu_info = ivhd->iommu_info;
/* For type 40h bits 31:28 and 12:0 are reserved */
ivhd_40->iommu_attributes = ivhd->iommu_feature_info & 0xfffe000;
@@ -275,7 +275,7 @@ static unsigned long acpi_fill_ivrs11(unsigned long current, acpi_ivrs_ivhd_t *i
ivhd_11->capability_offset = pci_find_capability(iommu_dev, IOMMU_CAP_ID);
ivhd_11->iommu_base_low = ivhd->iommu_base_low;
ivhd_11->iommu_base_high = ivhd->iommu_base_high;
- ivhd_11->pci_segment_group = 0x0000;
+ ivhd_11->pci_segment_group = nb_dev->bus->segment_group;
ivhd_11->iommu_info = ivhd->iommu_info;
ivhd11_attr_ptr = (ivhd11_iommu_attr_t *)&ivhd->iommu_feature_info;
ivhd_11->iommu_attributes.perf_counters = ivhd11_attr_ptr->perf_counters;
@@ -365,7 +365,7 @@ unsigned long acpi_fill_ivrs(acpi_ivrs_t *ivrs, unsigned long current)
ivhd->flags |= ((mmio_x18_value & MMIO_CTRL_HT_TUN_EN) ?
IVHD_FLAG_HT_TUN_EN : 0);
- ivhd->pci_segment_group = 0x0000;
+ ivhd->pci_segment_group = nb_dev->bus->segment_group;
ivhd->iommu_info = pci_read_config16(iommu_dev,
ivhd->capability_offset + 0x10) & 0x1F;
diff --git a/src/soc/amd/common/block/data_fabric/domain.c b/src/soc/amd/common/block/data_fabric/domain.c
index c2f1406e54ce..b827dd3fad8b 100644
--- a/src/soc/amd/common/block/data_fabric/domain.c
+++ b/src/soc/amd/common/block/data_fabric/domain.c
@@ -9,6 +9,7 @@
#include <cpu/amd/mtrr.h>
#include <cpu/cpu.h>
#include <device/device.h>
+#include <device/pci.h>
#include <device/pci_ops.h>
#include <types.h>
@@ -21,16 +22,16 @@ void amd_pci_domain_scan_bus(struct device *domain)
return;
}
- /* TODO: Implement support for more than one PCI segment group in coreboot */
- if (segment_group) {
- printk(BIOS_ERR, "coreboot currently only supports one PCI segment group.\n");
+ if (segment_group >= PCI_SEGMENT_GROUP_COUNT) {
+ printk(BIOS_ERR, "Skipping domain %u due to too large segment group %u.\n",
+ domain->path.domain.domain, segment_group);
return;
}
- /* TODO: Check if bus >= CONFIG_ECAM_MMCONF_BUS_NUMBER and return in that case */
+ /* TODO: Check if bus >= PCI_BUSES_PER_SEGMENT_GROUP and return in that case */
- /* Make sure to not report more than CONFIG_ECAM_MMCONF_BUS_NUMBER PCI buses */
- limit = MIN(limit, CONFIG_ECAM_MMCONF_BUS_NUMBER - 1);
+ /* Make sure to not report more than PCI_BUSES_PER_SEGMENT_GROUP PCI buses */
+ limit = MIN(limit, PCI_BUSES_PER_SEGMENT_GROUP - 1);
/* Set bus first number of PCI root */
domain->link_list->secondary = bus;
@@ -38,6 +39,7 @@ void amd_pci_domain_scan_bus(struct device *domain)
domain->link_list->subordinate = bus;
/* Tell allocator about maximum PCI bus number in domain */
domain->link_list->max_subordinate = limit;
+ domain->link_list->segment_group = segment_group;
pci_host_bridge_scan_bus(domain);
}
@@ -246,12 +248,13 @@ void amd_pci_domain_fill_ssdt(const struct device *domain)
acpigen_write_resourcetemplate_header();
/* PCI bus number range in domain */
- printk(BIOS_DEBUG, "%s _CRS: adding busses [%x-%x]\n", acpi_device_name(domain),
- domain->link_list->secondary, domain->link_list->max_subordinate);
+ printk(BIOS_DEBUG, "%s _CRS: adding busses [%x-%x] in segment group %x\n",
+ acpi_device_name(domain), domain->link_list->secondary,
+ domain->link_list->max_subordinate, domain->link_list->segment_group);
acpigen_resource_producer_bus_number(domain->link_list->secondary,
domain->link_list->max_subordinate);
- if (domain->link_list->secondary == 0) {
+ if (domain->link_list->secondary == 0 && domain->link_list->segment_group == 0) {
/* ACPI 6.4.2.5 I/O Port Descriptor */
acpigen_write_io16(PCI_IO_CONFIG_INDEX, PCI_IO_CONFIG_LAST_PORT, 1,
PCI_IO_CONFIG_PORT_COUNT, 1);
@@ -287,7 +290,7 @@ void amd_pci_domain_fill_ssdt(const struct device *domain)
acpigen_write_resourcetemplate_footer();
- acpigen_write_SEG(0);
+ acpigen_write_SEG(domain->link_list->segment_group);
acpigen_write_BBN(domain->link_list->secondary);
/* Scope */