diff options
-rw-r--r-- | hw/ppc/spapr_numa.c | 28 | ||||
-rw-r--r-- | hw/ppc/spapr_pci_nvlink2.c | 20 |
2 files changed, 30 insertions, 18 deletions
diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c index 42394f36e9..9073dbbceb 100644 --- a/hw/ppc/spapr_numa.c +++ b/hw/ppc/spapr_numa.c @@ -13,14 +13,18 @@ #include "qemu/osdep.h" #include "qemu-common.h" #include "hw/ppc/spapr_numa.h" +#include "hw/pci-host/spapr.h" #include "hw/ppc/fdt.h" +/* Moved from hw/ppc/spapr_pci_nvlink2.c */ +#define SPAPR_GPU_NUMA_ID (cpu_to_be32(1)) void spapr_numa_associativity_init(SpaprMachineState *spapr, MachineState *machine) { + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); int nb_numa_nodes = machine->numa_state->num_nodes; - int i; + int i, j, max_nodes_with_gpus; /* * For all associativity arrays: first position is the size, @@ -35,6 +39,28 @@ void spapr_numa_associativity_init(SpaprMachineState *spapr, spapr->numa_assoc_array[i][0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS); spapr->numa_assoc_array[i][MAX_DISTANCE_REF_POINTS] = cpu_to_be32(i); } + + /* + * Initialize NVLink GPU associativity arrays. We know that + * the first GPU will take the first available NUMA id, and + * we'll have a maximum of NVGPU_MAX_NUM GPUs in the machine. + * At this point we're not sure if there are GPUs or not, but + * let's initialize the associativity arrays and allow NVLink + * GPUs to be handled like regular NUMA nodes later on. + */ + max_nodes_with_gpus = nb_numa_nodes + NVGPU_MAX_NUM; + + for (i = nb_numa_nodes; i < max_nodes_with_gpus; i++) { + spapr->numa_assoc_array[i][0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS); + + for (j = 1; j < MAX_DISTANCE_REF_POINTS; j++) { + uint32_t gpu_assoc = smc->pre_5_1_assoc_refpoints ? + SPAPR_GPU_NUMA_ID : cpu_to_be32(i); + spapr->numa_assoc_array[i][j] = gpu_assoc; + } + + spapr->numa_assoc_array[i][MAX_DISTANCE_REF_POINTS] = cpu_to_be32(i); + } } void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt, diff --git a/hw/ppc/spapr_pci_nvlink2.c b/hw/ppc/spapr_pci_nvlink2.c index 76ae77ebc8..8ef9b40a18 100644 --- a/hw/ppc/spapr_pci_nvlink2.c +++ b/hw/ppc/spapr_pci_nvlink2.c @@ -26,6 +26,7 @@ #include "qemu-common.h" #include "hw/pci/pci.h" #include "hw/pci-host/spapr.h" +#include "hw/ppc/spapr_numa.h" #include "qemu/error-report.h" #include "hw/ppc/fdt.h" #include "hw/pci/pci_bridge.h" @@ -37,8 +38,6 @@ #define PHANDLE_NVLINK(phb, gn, nn) (0x00130000 | (((phb)->index) << 8) | \ ((gn) << 4) | (nn)) -#define SPAPR_GPU_NUMA_ID (cpu_to_be32(1)) - typedef struct SpaprPhbPciNvGpuSlot { uint64_t tgt; uint64_t gpa; @@ -360,13 +359,6 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt) Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev), "nvlink2-mr[0]", &error_abort); - uint32_t associativity[] = { - cpu_to_be32(0x4), - cpu_to_be32(nvslot->numa_id), - cpu_to_be32(nvslot->numa_id), - cpu_to_be32(nvslot->numa_id), - cpu_to_be32(nvslot->numa_id) - }; uint64_t size = object_property_get_uint(nv_mrobj, "size", NULL); uint64_t mem_reg[2] = { cpu_to_be64(nvslot->gpa), cpu_to_be64(size) }; char *mem_name = g_strdup_printf("memory@%"PRIx64, nvslot->gpa); @@ -376,14 +368,8 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt) _FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); _FDT((fdt_setprop(fdt, off, "reg", mem_reg, sizeof(mem_reg)))); - if (sphb->pre_5_1_assoc) { - associativity[1] = SPAPR_GPU_NUMA_ID; - associativity[2] = SPAPR_GPU_NUMA_ID; - associativity[3] = SPAPR_GPU_NUMA_ID; - } - - _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity, - sizeof(associativity)))); + spapr_numa_write_associativity_dt(SPAPR_MACHINE(qdev_get_machine()), + fdt, off, nvslot->numa_id); _FDT((fdt_setprop_string(fdt, off, "compatible", "ibm,coherent-device-memory"))); |