summaryrefslogtreecommitdiffstats
path: root/src/soc/intel/xeon_sp/spr/numa.c
diff options
context:
space:
mode:
authorJonathan Zhang <jonzhang@meta.com>2023-01-25 11:37:27 -0800
committerLean Sheng Tan <sheng.tan@9elements.com>2023-03-19 09:53:02 +0000
commit3ed903fda9cb9b7237067f301d1efdb297a05a24 (patch)
treed5dd7beda731aea7ddbd6a80c7c151b5d1d38107 /src/soc/intel/xeon_sp/spr/numa.c
parent15fc45982b9b8303978ab87ea6c93d423834e6e8 (diff)
downloadcoreboot-3ed903fda9cb9b7237067f301d1efdb297a05a24.tar.gz
coreboot-3ed903fda9cb9b7237067f301d1efdb297a05a24.tar.bz2
coreboot-3ed903fda9cb9b7237067f301d1efdb297a05a24.zip
soc/intel/xeon_sp/spr: Add Sapphire Rapids ramstage code
It implements SPR ramstage including silicon initialization, MSR programming, MP init and certain registers locking before booting to payload. Change-Id: I128fdc6e58c49fb5abf911d6ffa91e7411f6d1e2 Signed-off-by: Jonathan Zhang <jonzhang@meta.com> Signed-off-by: Johnny Lin <johnny_lin@wiwynn.com> Reviewed-on: https://review.coreboot.org/c/coreboot/+/72443 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Lean Sheng Tan <sheng.tan@9elements.com>
Diffstat (limited to 'src/soc/intel/xeon_sp/spr/numa.c')
-rw-r--r--src/soc/intel/xeon_sp/spr/numa.c117
1 files changed, 117 insertions, 0 deletions
diff --git a/src/soc/intel/xeon_sp/spr/numa.c b/src/soc/intel/xeon_sp/spr/numa.c
new file mode 100644
index 000000000000..6aba7ccad584
--- /dev/null
+++ b/src/soc/intel/xeon_sp/spr/numa.c
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <console/console.h>
+#include <commonlib/stdlib.h>
+#include <device/device.h>
+#include <device/pci_ops.h>
+#include <device/pci.h>
+#include <device/pciexp.h>
+#include <soc/numa.h>
+#include <soc/soc_util.h>
+#include <soc/util.h>
+#include <types.h>
+
+void dump_pds(void)
+{
+ printk(BIOS_DEBUG, "====== Proximity Domain Dump ======\n");
+ printk(BIOS_DEBUG, "number of proximity domains: %d\n", pds.num_pds);
+ for (uint8_t i = 0; i < pds.num_pds; i++) {
+ printk(BIOS_DEBUG, "\tproximity domain %d:\n", i);
+ printk(BIOS_DEBUG, "\t\ttype:%d\n", pds.pds[i].pd_type);
+ printk(BIOS_DEBUG, "\t\tsocket_bitmap:0x%x\n", pds.pds[i].socket_bitmap);
+ printk(BIOS_DEBUG, "\t\tdevice_handle:0x%x\n", pds.pds[i].device_handle);
+ printk(BIOS_DEBUG, "\t\tbase(64MB):0x%x\n", pds.pds[i].base);
+ printk(BIOS_DEBUG, "\t\tsize(64MB):0x%x\n", pds.pds[i].size);
+ }
+}
+
+enum cb_err fill_pds(void)
+{
+ uint8_t num_sockets = soc_get_num_cpus();
+ uint8_t num_cxlnodes = get_cxl_node_count();
+
+ /*
+ * Rules/assumptions:
+ * 1. Each processor has a processor proximity domain regardless whether
+ * a processor has DIMM attached to it or not.
+ * 2. All system memory map elements are either from processor attached memory,
+ * or from CXL memory. Each CXL node info entry has a corresponding entry
+ * in system memory map elements.
+ * 3. Each CXL device may have multiple HDMs (Host-managed Device Memory). Each
+ * HDM has one and only one CXL node info entry. Each CXL node info entry
+ * represents a generic initiator proximity domain.
+ */
+ pds.num_pds = num_cxlnodes + num_sockets;
+ pds.pds = xmalloc(sizeof(struct proximity_domain) * pds.num_pds);
+ if (!pds.pds)
+ die("%s %s out of memory.", __FILE__, __LINE__);
+
+ memset(pds.pds, 0, sizeof(struct proximity_domain) * pds.num_pds);
+
+ /* Fill in processor domains */
+ uint8_t i, j;
+ struct device *dev;
+ for (i = 0; i < num_sockets; i++) {
+ pds.pds[i].pd_type = PD_TYPE_PROCESSOR;
+ pds.pds[i].socket_bitmap = 1 << i;
+ pds.pds[i].distances = malloc(sizeof(uint8_t) * pds.num_pds);
+ if (!pds.pds[i].distances)
+ die("%s %s out of memory.", __FILE__, __LINE__);
+ /* hard code the distances for now, till we know how to calculate them. */
+ for (j = 0; j < pds.num_pds; j++) {
+ if (j == i)
+ pds.pds[i].distances[j] = 0x0a;
+ else
+ pds.pds[i].distances[j] = 0x0e;
+ }
+ }
+
+ /* If there are no CXL nodes, we are done */
+ if (num_cxlnodes == 0)
+ return CB_SUCCESS;
+
+ /* There are CXL nodes, fill in generic initiator domain after the processors pds */
+ uint8_t skt_id, cxl_id;
+ const CXL_NODE_SOCKET *cxl_hob = get_cxl_node();
+ for (skt_id = 0, i = num_sockets; skt_id < MAX_SOCKET; skt_id++, i++) {
+ for (cxl_id = 0; cxl_id < cxl_hob[skt_id].CxlNodeCount; ++cxl_id) {
+ const CXL_NODE_INFO node = cxl_hob[skt_id].CxlNodeInfo[cxl_id];
+ pds.pds[i].pd_type = PD_TYPE_GENERIC_INITIATOR;
+ pds.pds[i].socket_bitmap = node.SocketBitmap;
+ pds.pds[i].base = node.Address;
+ pds.pds[i].size = node.Size;
+ dev = pcie_find_dsn(node.SerialNumber, node.VendorId, 0);
+ pds.pds[i].device_handle = PCI_BDF(dev);
+ pds.pds[i].distances = malloc(sizeof(uint8_t) * pds.num_pds);
+ if (!pds.pds[i].distances)
+ die("%s %s out of memory.", __FILE__, __LINE__);
+ /* hard code the distances until we know how to calculate them */
+ for (j = 0; j < pds.num_pds; j++) {
+ if (j == i)
+ pds.pds[i].distances[j] = 0x0a;
+ else
+ pds.pds[i].distances[j] = 0x0e;
+ }
+ }
+ }
+
+ return CB_SUCCESS;
+}
+
+/*
+ * Return the total size of memory regions in generic initiator affinity domains.
+ * The size is in unit of 64MB.
+ */
+uint32_t get_generic_initiator_mem_size(void)
+{
+ uint8_t i;
+ uint32_t size = 0;
+
+ for (i = 0; i < pds.num_pds; i++) {
+ if (pds.pds[i].pd_type == PD_TYPE_PROCESSOR)
+ continue;
+ size += pds.pds[i].size;
+ }
+
+ return size;
+}