mirror of https://github.com/xemu-project/xemu.git
pull-loongarch-20240523
-----BEGIN PGP SIGNATURE----- iLMEAAEKAB0WIQS4/x2g0v3LLaCcbCxAov/yOSY+3wUCZk6fPgAKCRBAov/yOSY+ 35rwA/98G/tODhR2PAl7qZr6+6z8vazkiT4iNNHgxnw/T2TKsh2YONe+2gtKhTa1 HKYANMykWTxOtBZeCYY9Z5QNj8DuC3xKc1zY1pC1AwRcflsMlGz0WoAC78Gbl9TC PBCwyu01hsFoYpIstH/dOGbNsR2OFRLnnGUVFUKtPuS3O+59hg== =OzUv -----END PGP SIGNATURE----- Merge tag 'pull-loongarch-20240523' of https://gitlab.com/gaosong/qemu into staging pull-loongarch-20240523 # -----BEGIN PGP SIGNATURE----- # # iLMEAAEKAB0WIQS4/x2g0v3LLaCcbCxAov/yOSY+3wUCZk6fPgAKCRBAov/yOSY+ # 35rwA/98G/tODhR2PAl7qZr6+6z8vazkiT4iNNHgxnw/T2TKsh2YONe+2gtKhTa1 # HKYANMykWTxOtBZeCYY9Z5QNj8DuC3xKc1zY1pC1AwRcflsMlGz0WoAC78Gbl9TC # PBCwyu01hsFoYpIstH/dOGbNsR2OFRLnnGUVFUKtPuS3O+59hg== # =OzUv # -----END PGP SIGNATURE----- # gpg: Signature made Wed 22 May 2024 06:43:26 PM PDT # gpg: using RSA key B8FF1DA0D2FDCB2DA09C6C2C40A2FFF239263EDF # gpg: Good signature from "Song Gao <m17746591750@163.com>" [unknown] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: B8FF 1DA0 D2FD CB2D A09C 6C2C 40A2 FFF2 3926 3EDF * tag 'pull-loongarch-20240523' of https://gitlab.com/gaosong/qemu: hw/loongarch/virt: Fix FDT memory node address width target/loongarch: Add loongarch vector property unconditionally hw/loongarch: Remove minimum and default memory size hw/loongarch: Refine system dram memory region hw/loongarch: Refine fwcfg memory map hw/loongarch: Refine fadt memory table for numa memory hw/loongarch: Refine acpi srat table for numa memory hw/loongarch: Add VM mode in IOCSR feature register in kvm mode target/loongarch/kvm: fpu save the vreg registers high 192bit target/loongarch/kvm: Fix VM recovery from disk failures Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
commit
50c3fc72b5
|
@ -166,8 +166,9 @@ static void
|
|||
build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
|
||||
{
|
||||
int i, arch_id, node_id;
|
||||
uint64_t mem_len, mem_base;
|
||||
int nb_numa_nodes = machine->numa_state->num_nodes;
|
||||
hwaddr len, base, gap;
|
||||
NodeInfo *numa_info;
|
||||
int nodes, nb_numa_nodes = machine->numa_state->num_nodes;
|
||||
LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine);
|
||||
MachineClass *mc = MACHINE_GET_CLASS(lvms);
|
||||
const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(machine);
|
||||
|
@ -196,35 +197,44 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
|
|||
build_append_int_noprefix(table_data, 0, 4); /* Reserved */
|
||||
}
|
||||
|
||||
/* Node0 */
|
||||
build_srat_memory(table_data, VIRT_LOWMEM_BASE, VIRT_LOWMEM_SIZE,
|
||||
0, MEM_AFFINITY_ENABLED);
|
||||
mem_base = VIRT_HIGHMEM_BASE;
|
||||
if (!nb_numa_nodes) {
|
||||
mem_len = machine->ram_size - VIRT_LOWMEM_SIZE;
|
||||
} else {
|
||||
mem_len = machine->numa_state->nodes[0].node_mem - VIRT_LOWMEM_SIZE;
|
||||
base = VIRT_LOWMEM_BASE;
|
||||
gap = VIRT_LOWMEM_SIZE;
|
||||
numa_info = machine->numa_state->nodes;
|
||||
nodes = nb_numa_nodes;
|
||||
if (!nodes) {
|
||||
nodes = 1;
|
||||
}
|
||||
if (mem_len)
|
||||
build_srat_memory(table_data, mem_base, mem_len, 0, MEM_AFFINITY_ENABLED);
|
||||
|
||||
/* Node1 - Nodemax */
|
||||
if (nb_numa_nodes) {
|
||||
mem_base += mem_len;
|
||||
for (i = 1; i < nb_numa_nodes; ++i) {
|
||||
if (machine->numa_state->nodes[i].node_mem > 0) {
|
||||
build_srat_memory(table_data, mem_base,
|
||||
machine->numa_state->nodes[i].node_mem, i,
|
||||
MEM_AFFINITY_ENABLED);
|
||||
mem_base += machine->numa_state->nodes[i].node_mem;
|
||||
}
|
||||
for (i = 0; i < nodes; i++) {
|
||||
if (nb_numa_nodes) {
|
||||
len = numa_info[i].node_mem;
|
||||
} else {
|
||||
len = machine->ram_size;
|
||||
}
|
||||
|
||||
/*
|
||||
* memory for the node splited into two part
|
||||
* lowram: [base, +gap)
|
||||
* highram: [VIRT_HIGHMEM_BASE, +(len - gap))
|
||||
*/
|
||||
if (len >= gap) {
|
||||
build_srat_memory(table_data, base, len, i, MEM_AFFINITY_ENABLED);
|
||||
len -= gap;
|
||||
base = VIRT_HIGHMEM_BASE;
|
||||
gap = machine->ram_size - VIRT_LOWMEM_SIZE;
|
||||
}
|
||||
|
||||
if (len) {
|
||||
build_srat_memory(table_data, base, len, i, MEM_AFFINITY_ENABLED);
|
||||
base += len;
|
||||
gap -= len;
|
||||
}
|
||||
}
|
||||
|
||||
if (machine->device_memory) {
|
||||
build_srat_memory(table_data, machine->device_memory->base,
|
||||
memory_region_size(&machine->device_memory->mr),
|
||||
nb_numa_nodes - 1,
|
||||
nodes - 1,
|
||||
MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED);
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "qapi/error.h"
|
||||
#include "hw/boards.h"
|
||||
#include "hw/char/serial.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "sysemu/qtest.h"
|
||||
#include "sysemu/runstate.h"
|
||||
|
@ -463,7 +464,8 @@ static void fdt_add_memory_node(MachineState *ms,
|
|||
char *nodename = g_strdup_printf("/memory@%" PRIx64, base);
|
||||
|
||||
qemu_fdt_add_subnode(ms->fdt, nodename);
|
||||
qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", 0, base, 0, size);
|
||||
qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", base >> 32, base,
|
||||
size >> 32, size);
|
||||
qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "memory");
|
||||
|
||||
if (ms->numa_state && ms->numa_state->num_nodes) {
|
||||
|
@ -473,6 +475,48 @@ static void fdt_add_memory_node(MachineState *ms,
|
|||
g_free(nodename);
|
||||
}
|
||||
|
||||
static void fdt_add_memory_nodes(MachineState *ms)
|
||||
{
|
||||
hwaddr base, size, ram_size, gap;
|
||||
int i, nb_numa_nodes, nodes;
|
||||
NodeInfo *numa_info;
|
||||
|
||||
ram_size = ms->ram_size;
|
||||
base = VIRT_LOWMEM_BASE;
|
||||
gap = VIRT_LOWMEM_SIZE;
|
||||
nodes = nb_numa_nodes = ms->numa_state->num_nodes;
|
||||
numa_info = ms->numa_state->nodes;
|
||||
if (!nodes) {
|
||||
nodes = 1;
|
||||
}
|
||||
|
||||
for (i = 0; i < nodes; i++) {
|
||||
if (nb_numa_nodes) {
|
||||
size = numa_info[i].node_mem;
|
||||
} else {
|
||||
size = ram_size;
|
||||
}
|
||||
|
||||
/*
|
||||
* memory for the node splited into two part
|
||||
* lowram: [base, +gap)
|
||||
* highram: [VIRT_HIGHMEM_BASE, +(len - gap))
|
||||
*/
|
||||
if (size >= gap) {
|
||||
fdt_add_memory_node(ms, base, gap, i);
|
||||
size -= gap;
|
||||
base = VIRT_HIGHMEM_BASE;
|
||||
gap = ram_size - VIRT_LOWMEM_SIZE;
|
||||
}
|
||||
|
||||
if (size) {
|
||||
fdt_add_memory_node(ms, base, size, i);
|
||||
base += size;
|
||||
gap -= size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void virt_build_smbios(LoongArchVirtMachineState *lvms)
|
||||
{
|
||||
MachineState *ms = MACHINE(lvms);
|
||||
|
@ -840,18 +884,23 @@ static void virt_iocsr_misc_write(void *opaque, hwaddr addr,
|
|||
|
||||
static uint64_t virt_iocsr_misc_read(void *opaque, hwaddr addr, unsigned size)
|
||||
{
|
||||
uint64_t ret;
|
||||
|
||||
switch (addr) {
|
||||
case VERSION_REG:
|
||||
return 0x11ULL;
|
||||
case FEATURE_REG:
|
||||
return 1ULL << IOCSRF_MSI | 1ULL << IOCSRF_EXTIOI |
|
||||
1ULL << IOCSRF_CSRIPI;
|
||||
ret = BIT(IOCSRF_MSI) | BIT(IOCSRF_EXTIOI) | BIT(IOCSRF_CSRIPI);
|
||||
if (kvm_enabled()) {
|
||||
ret |= BIT(IOCSRF_VM);
|
||||
}
|
||||
return ret;
|
||||
case VENDOR_REG:
|
||||
return 0x6e6f73676e6f6f4cULL; /* "Loongson" */
|
||||
case CPUNAME_REG:
|
||||
return 0x303030354133ULL; /* "3A5000" */
|
||||
case MISC_FUNC_REG:
|
||||
return 1ULL << IOCSRM_EXTIOI_EN;
|
||||
return BIT_ULL(IOCSRM_EXTIOI_EN);
|
||||
}
|
||||
return 0ULL;
|
||||
}
|
||||
|
@ -870,18 +919,70 @@ static const MemoryRegionOps virt_iocsr_misc_ops = {
|
|||
},
|
||||
};
|
||||
|
||||
static void fw_cfg_add_memory(MachineState *ms)
|
||||
{
|
||||
hwaddr base, size, ram_size, gap;
|
||||
int nb_numa_nodes, nodes;
|
||||
NodeInfo *numa_info;
|
||||
|
||||
ram_size = ms->ram_size;
|
||||
base = VIRT_LOWMEM_BASE;
|
||||
gap = VIRT_LOWMEM_SIZE;
|
||||
nodes = nb_numa_nodes = ms->numa_state->num_nodes;
|
||||
numa_info = ms->numa_state->nodes;
|
||||
if (!nodes) {
|
||||
nodes = 1;
|
||||
}
|
||||
|
||||
/* add fw_cfg memory map of node0 */
|
||||
if (nb_numa_nodes) {
|
||||
size = numa_info[0].node_mem;
|
||||
} else {
|
||||
size = ram_size;
|
||||
}
|
||||
|
||||
if (size >= gap) {
|
||||
memmap_add_entry(base, gap, 1);
|
||||
size -= gap;
|
||||
base = VIRT_HIGHMEM_BASE;
|
||||
gap = ram_size - VIRT_LOWMEM_SIZE;
|
||||
}
|
||||
|
||||
if (size) {
|
||||
memmap_add_entry(base, size, 1);
|
||||
base += size;
|
||||
}
|
||||
|
||||
if (nodes < 2) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* add fw_cfg memory map of other nodes */
|
||||
size = ram_size - numa_info[0].node_mem;
|
||||
gap = VIRT_LOWMEM_BASE + VIRT_LOWMEM_SIZE;
|
||||
if (base < gap && (base + size) > gap) {
|
||||
/*
|
||||
* memory map for the maining nodes splited into two part
|
||||
* lowram: [base, +(gap - base))
|
||||
* highram: [VIRT_HIGHMEM_BASE, +(size - (gap - base)))
|
||||
*/
|
||||
memmap_add_entry(base, gap - base, 1);
|
||||
size -= gap - base;
|
||||
base = VIRT_HIGHMEM_BASE;
|
||||
}
|
||||
|
||||
if (size)
|
||||
memmap_add_entry(base, size, 1);
|
||||
}
|
||||
|
||||
static void virt_init(MachineState *machine)
|
||||
{
|
||||
LoongArchCPU *lacpu;
|
||||
const char *cpu_model = machine->cpu_type;
|
||||
ram_addr_t offset = 0;
|
||||
ram_addr_t ram_size = machine->ram_size;
|
||||
uint64_t highram_size = 0, phyAddr = 0;
|
||||
MemoryRegion *address_space_mem = get_system_memory();
|
||||
LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine);
|
||||
int nb_numa_nodes = machine->numa_state->num_nodes;
|
||||
NodeInfo *numa_info = machine->numa_state->nodes;
|
||||
int i;
|
||||
hwaddr base, size, ram_size = machine->ram_size;
|
||||
const CPUArchIdList *possible_cpus;
|
||||
MachineClass *mc = MACHINE_GET_CLASS(machine);
|
||||
CPUState *cpu;
|
||||
|
@ -890,10 +991,6 @@ static void virt_init(MachineState *machine)
|
|||
cpu_model = LOONGARCH_CPU_TYPE_NAME("la464");
|
||||
}
|
||||
|
||||
if (ram_size < 1 * GiB) {
|
||||
error_report("ram_size must be greater than 1G.");
|
||||
exit(1);
|
||||
}
|
||||
create_fdt(lvms);
|
||||
|
||||
/* Create IOCSR space */
|
||||
|
@ -915,48 +1012,31 @@ static void virt_init(MachineState *machine)
|
|||
lacpu->phy_id = machine->possible_cpus->cpus[i].arch_id;
|
||||
}
|
||||
fdt_add_cpu_nodes(lvms);
|
||||
fdt_add_memory_nodes(machine);
|
||||
fw_cfg_add_memory(machine);
|
||||
|
||||
/* Node0 memory */
|
||||
memmap_add_entry(VIRT_LOWMEM_BASE, VIRT_LOWMEM_SIZE, 1);
|
||||
fdt_add_memory_node(machine, VIRT_LOWMEM_BASE, VIRT_LOWMEM_SIZE, 0);
|
||||
memory_region_init_alias(&lvms->lowmem, NULL, "loongarch.node0.lowram",
|
||||
machine->ram, offset, VIRT_LOWMEM_SIZE);
|
||||
memory_region_add_subregion(address_space_mem, phyAddr, &lvms->lowmem);
|
||||
|
||||
offset += VIRT_LOWMEM_SIZE;
|
||||
if (nb_numa_nodes > 0) {
|
||||
assert(numa_info[0].node_mem > VIRT_LOWMEM_SIZE);
|
||||
highram_size = numa_info[0].node_mem - VIRT_LOWMEM_SIZE;
|
||||
} else {
|
||||
highram_size = ram_size - VIRT_LOWMEM_SIZE;
|
||||
size = ram_size;
|
||||
base = VIRT_LOWMEM_BASE;
|
||||
if (size > VIRT_LOWMEM_SIZE) {
|
||||
size = VIRT_LOWMEM_SIZE;
|
||||
}
|
||||
phyAddr = VIRT_HIGHMEM_BASE;
|
||||
memmap_add_entry(phyAddr, highram_size, 1);
|
||||
fdt_add_memory_node(machine, phyAddr, highram_size, 0);
|
||||
memory_region_init_alias(&lvms->highmem, NULL, "loongarch.node0.highram",
|
||||
machine->ram, offset, highram_size);
|
||||
memory_region_add_subregion(address_space_mem, phyAddr, &lvms->highmem);
|
||||
|
||||
/* Node1 - Nodemax memory */
|
||||
offset += highram_size;
|
||||
phyAddr += highram_size;
|
||||
|
||||
for (i = 1; i < nb_numa_nodes; i++) {
|
||||
MemoryRegion *nodemem = g_new(MemoryRegion, 1);
|
||||
g_autofree char *ramName = g_strdup_printf("loongarch.node%d.ram", i);
|
||||
memory_region_init_alias(nodemem, NULL, ramName, machine->ram,
|
||||
offset, numa_info[i].node_mem);
|
||||
memory_region_add_subregion(address_space_mem, phyAddr, nodemem);
|
||||
memmap_add_entry(phyAddr, numa_info[i].node_mem, 1);
|
||||
fdt_add_memory_node(machine, phyAddr, numa_info[i].node_mem, i);
|
||||
offset += numa_info[i].node_mem;
|
||||
phyAddr += numa_info[i].node_mem;
|
||||
memory_region_init_alias(&lvms->lowmem, NULL, "loongarch.lowram",
|
||||
machine->ram, base, size);
|
||||
memory_region_add_subregion(address_space_mem, base, &lvms->lowmem);
|
||||
base += size;
|
||||
if (ram_size - size) {
|
||||
base = VIRT_HIGHMEM_BASE;
|
||||
memory_region_init_alias(&lvms->highmem, NULL, "loongarch.highram",
|
||||
machine->ram, VIRT_LOWMEM_BASE + size, ram_size - size);
|
||||
memory_region_add_subregion(address_space_mem, base, &lvms->highmem);
|
||||
base += ram_size - size;
|
||||
}
|
||||
|
||||
/* initialize device memory address space */
|
||||
if (machine->ram_size < machine->maxram_size) {
|
||||
ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size;
|
||||
hwaddr device_mem_base;
|
||||
|
||||
if (machine->ram_slots > ACPI_MAX_RAM_SLOTS) {
|
||||
error_report("unsupported amount of memory slots: %"PRIu64,
|
||||
|
@ -970,9 +1050,7 @@ static void virt_init(MachineState *machine)
|
|||
"%d bytes", TARGET_PAGE_SIZE);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
/* device memory base is the top of high memory address. */
|
||||
device_mem_base = ROUND_UP(VIRT_HIGHMEM_BASE + highram_size, 1 * GiB);
|
||||
machine_memory_devices_init(machine, device_mem_base, device_mem_size);
|
||||
machine_memory_devices_init(machine, base, device_mem_size);
|
||||
}
|
||||
|
||||
/* load the BIOS image. */
|
||||
|
@ -1198,7 +1276,6 @@ static void virt_class_init(ObjectClass *oc, void *data)
|
|||
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
|
||||
|
||||
mc->init = virt_init;
|
||||
mc->default_ram_size = 1 * GiB;
|
||||
mc->default_cpu_type = LOONGARCH_CPU_TYPE_NAME("la464");
|
||||
mc->default_ram_id = "loongarch.ram";
|
||||
mc->max_cpus = LOONGARCH_MAX_CPUS;
|
||||
|
|
|
@ -645,16 +645,10 @@ static void loongarch_set_lasx(Object *obj, bool value, Error **errp)
|
|||
|
||||
void loongarch_cpu_post_init(Object *obj)
|
||||
{
|
||||
LoongArchCPU *cpu = LOONGARCH_CPU(obj);
|
||||
|
||||
if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) {
|
||||
object_property_add_bool(obj, "lsx", loongarch_get_lsx,
|
||||
loongarch_set_lsx);
|
||||
}
|
||||
if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) {
|
||||
object_property_add_bool(obj, "lasx", loongarch_get_lasx,
|
||||
loongarch_set_lasx);
|
||||
}
|
||||
object_property_add_bool(obj, "lsx", loongarch_get_lsx,
|
||||
loongarch_set_lsx);
|
||||
object_property_add_bool(obj, "lasx", loongarch_get_lasx,
|
||||
loongarch_set_lasx);
|
||||
}
|
||||
|
||||
static void loongarch_cpu_init(Object *obj)
|
||||
|
|
|
@ -436,6 +436,9 @@ static int kvm_loongarch_get_regs_fp(CPUState *cs)
|
|||
env->fcsr0 = fpu.fcsr;
|
||||
for (i = 0; i < 32; i++) {
|
||||
env->fpr[i].vreg.UD[0] = fpu.fpr[i].val64[0];
|
||||
env->fpr[i].vreg.UD[1] = fpu.fpr[i].val64[1];
|
||||
env->fpr[i].vreg.UD[2] = fpu.fpr[i].val64[2];
|
||||
env->fpr[i].vreg.UD[3] = fpu.fpr[i].val64[3];
|
||||
}
|
||||
for (i = 0; i < 8; i++) {
|
||||
env->cf[i] = fpu.fcc & 0xFF;
|
||||
|
@ -455,6 +458,9 @@ static int kvm_loongarch_put_regs_fp(CPUState *cs)
|
|||
fpu.fcc = 0;
|
||||
for (i = 0; i < 32; i++) {
|
||||
fpu.fpr[i].val64[0] = env->fpr[i].vreg.UD[0];
|
||||
fpu.fpr[i].val64[1] = env->fpr[i].vreg.UD[1];
|
||||
fpu.fpr[i].val64[2] = env->fpr[i].vreg.UD[2];
|
||||
fpu.fpr[i].val64[3] = env->fpr[i].vreg.UD[3];
|
||||
}
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
|
|
|
@ -145,8 +145,8 @@ static const VMStateDescription vmstate_tlb = {
|
|||
/* LoongArch CPU state */
|
||||
const VMStateDescription vmstate_loongarch_cpu = {
|
||||
.name = "cpu",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.version_id = 2,
|
||||
.minimum_version_id = 2,
|
||||
.fields = (const VMStateField[]) {
|
||||
VMSTATE_UINTTL_ARRAY(env.gpr, LoongArchCPU, 32),
|
||||
VMSTATE_UINTTL(env.pc, LoongArchCPU),
|
||||
|
@ -208,6 +208,8 @@ const VMStateDescription vmstate_loongarch_cpu = {
|
|||
VMSTATE_UINT64(env.CSR_DERA, LoongArchCPU),
|
||||
VMSTATE_UINT64(env.CSR_DSAVE, LoongArchCPU),
|
||||
|
||||
VMSTATE_UINT64(kvm_state_counter, LoongArchCPU),
|
||||
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
.subsections = (const VMStateDescription * const []) {
|
||||
|
|
Loading…
Reference in New Issue