时间轴

2025-11-21

  1. init

环境

1
2
3
4
5
6
wget https://download.qemu.org/qemu-10.1.2.tar.xz
tar xvJf qemu-10.1.2.tar.xz
cd qemu-10.1.2
mkdir -p output
./configure --prefix=$PWD/output --target-list=aarch64-softmmu,riscv64-softmmu --enable-debug
bear -- make -j$(nproc)

创建.clangd

1
2
3
CompileFlags:
Add: -Wno-unknown-warning-option
Remove: [-m*, -f*]

virt Machine初始化

最方便的办法,是通过 gdb 来反向定位源码。

按照前面 QOM 的讲解,virt Machine 必定属于一个 Qobject,我们可以在它的 class 初始化或者 object 实例化的源码位置,打一个断点,来观察调用栈。这里我们先搜索一下 virt Machine 源码里关于 typeinfo 相关的代码:

virt_machine_typeinfo

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
static const TypeInfo virt_machine_typeinfo = {
.name = MACHINE_TYPE_NAME("virt"),
.parent = TYPE_MACHINE,
.class_init = virt_machine_class_init,
.instance_init = virt_machine_instance_init,
.instance_size = sizeof(RISCVVirtState),
.interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
},
};

static void virt_machine_init_register_types(void)
{
type_register_static(&virt_machine_typeinfo);
}

type_init(virt_machine_init_register_types)

所以,我们可以给 virt_machine_class_init()virt_machine_instance_init() 分别下两个断点。

使用下面的命令用 gdb 调试 QEMU :

1
2
3
4
5
$ gdb ./build/qemu-system-riscv64 -ex "set args -M virt -nographic"
(gdb) b virt_machine_class_init
(gdb) b virt_machine_instance_init
(gdb) r
...

virt_machine_class_init

首先我们跟踪到的,是 virt_machine_class_init 函数

hw/riscv/virt.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
static void virt_machine_class_init(ObjectClass *oc, const void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);

mc->desc = "RISC-V VirtIO board";
mc->init = virt_machine_init;
mc->max_cpus = VIRT_CPUS_MAX;
mc->default_cpu_type = TYPE_RISCV_CPU_BASE;
mc->block_default_type = IF_VIRTIO;
mc->no_cdrom = 1;
mc->pci_allow_0_address = true;
mc->possible_cpu_arch_ids = riscv_numa_possible_cpu_arch_ids;
mc->cpu_index_to_instance_props = riscv_numa_cpu_index_to_props;
mc->get_default_cpu_node_id = riscv_numa_get_default_cpu_node_id;
mc->numa_mem_supported = true;
/* platform instead of architectural choice */
mc->cpu_cluster_has_numa_boundary = true;
mc->default_ram_id = "riscv_virt_board.ram";
assert(!mc->get_hotplug_handler);
mc->get_hotplug_handler = virt_machine_get_hotplug_handler;

hc->plug = virt_machine_device_plug_cb;

machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE);
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_UEFI_VARS_SYSBUS);
#ifdef CONFIG_TPM
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS);
#endif

object_class_property_add_bool(oc, "aclint", virt_get_aclint,
virt_set_aclint);
object_class_property_set_description(oc, "aclint",
"(TCG only) Set on/off to "
"enable/disable emulating "
"ACLINT devices");

object_class_property_add_str(oc, "aia", virt_get_aia,
virt_set_aia);
object_class_property_set_description(oc, "aia",
"Set type of AIA interrupt "
"controller. Valid values are "
"none, aplic, and aplic-imsic.");

object_class_property_add_str(oc, "aia-guests",
virt_get_aia_guests,
virt_set_aia_guests);
{
g_autofree char *str =
g_strdup_printf("Set number of guest MMIO pages for AIA IMSIC. "
"Valid value should be between 0 and %d.",
VIRT_IRQCHIP_MAX_GUESTS);
object_class_property_set_description(oc, "aia-guests", str);
}

object_class_property_add(oc, "acpi", "OnOffAuto",
virt_get_acpi, virt_set_acpi,
NULL, NULL);
object_class_property_set_description(oc, "acpi",
"Enable ACPI");

object_class_property_add(oc, "iommu-sys", "OnOffAuto",
virt_get_iommu_sys, virt_set_iommu_sys,
NULL, NULL);
object_class_property_set_description(oc, "iommu-sys",
"Enable IOMMU platform device");
}

调用栈:

virt_machine_init调用栈

virt_machine_instance_init

hw/riscv/virt.c

1
2
3
4
5
6
7
8
9
10
11
static void virt_machine_instance_init(Object *obj)
{
RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);

virt_flash_create(s);

s->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6);
s->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8);
s->acpi = ON_OFF_AUTO_AUTO;
s->iommu_sys = ON_OFF_AUTO_AUTO;
}

调用栈:

virt_machine_instance_init

这里可以看到,main 函数首先调用了 qemu_init 函数,然后再调用 qemu_create_machine 函数来创建 machine:

1
main() → qemu_init() → qemu_create_machine()

然后先创建 class,然后再实例化 qobject,这和前面讲解 QOM 给出的流程一致。

virt_machine_init

对于virt_machine_class_init,其把定义的MachineClass的init赋值为virt_machine_init

1
2
3
4
MachineClass *mc = MACHINE_CLASS(oc);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
mc->desc = "RISC-V VirtIO board";
mc->init = virt_machine_init;

hw/riscv/virt.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
static void virt_machine_init(MachineState *machine)
{
RISCVVirtState *s = RISCV_VIRT_MACHINE(machine);
MemoryRegion *system_memory = get_system_memory();
MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
DeviceState *mmio_irqchip, *virtio_irqchip, *pcie_irqchip;
int i, base_hartid, hart_count;
int socket_count = riscv_socket_count(machine);

s->memmap = virt_memmap;

/* Check socket count limit */
if (VIRT_SOCKETS_MAX < socket_count) {
error_report("number of sockets/nodes should be less than %d",
VIRT_SOCKETS_MAX);
exit(1);
}

if (!virt_aclint_allowed() && s->have_aclint) {
error_report("'aclint' is only available with TCG acceleration");
exit(1);
}

/* Initialize sockets */
mmio_irqchip = virtio_irqchip = pcie_irqchip = NULL;
for (i = 0; i < socket_count; i++) {
g_autofree char *soc_name = g_strdup_printf("soc%d", i);

if (!riscv_socket_check_hartids(machine, i)) {
error_report("discontinuous hartids in socket%d", i);
exit(1);
}

base_hartid = riscv_socket_first_hartid(machine, i);
if (base_hartid < 0) {
error_report("can't find hartid base for socket%d", i);
exit(1);
}

hart_count = riscv_socket_hart_count(machine, i);
if (hart_count < 0) {
error_report("can't find hart count for socket%d", i);
exit(1);
}

object_initialize_child(OBJECT(machine), soc_name, &s->soc[i],
TYPE_RISCV_HART_ARRAY);
object_property_set_str(OBJECT(&s->soc[i]), "cpu-type",
machine->cpu_type, &error_abort);
object_property_set_int(OBJECT(&s->soc[i]), "hartid-base",
base_hartid, &error_abort);
object_property_set_int(OBJECT(&s->soc[i]), "num-harts",
hart_count, &error_abort);
sysbus_realize(SYS_BUS_DEVICE(&s->soc[i]), &error_fatal);

if (virt_aclint_allowed() && s->have_aclint) {
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
/* Per-socket ACLINT MTIMER */
riscv_aclint_mtimer_create(s->memmap[VIRT_CLINT].base +
i * RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
base_hartid, hart_count,
RISCV_ACLINT_DEFAULT_MTIMECMP,
RISCV_ACLINT_DEFAULT_MTIME,
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
} else {
/* Per-socket ACLINT MSWI, MTIMER, and SSWI */
riscv_aclint_swi_create(s->memmap[VIRT_CLINT].base +
i * s->memmap[VIRT_CLINT].size,
base_hartid, hart_count, false);
riscv_aclint_mtimer_create(s->memmap[VIRT_CLINT].base +
i * s->memmap[VIRT_CLINT].size +
RISCV_ACLINT_SWI_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
base_hartid, hart_count,
RISCV_ACLINT_DEFAULT_MTIMECMP,
RISCV_ACLINT_DEFAULT_MTIME,
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
riscv_aclint_swi_create(s->memmap[VIRT_ACLINT_SSWI].base +
i * s->memmap[VIRT_ACLINT_SSWI].size,
base_hartid, hart_count, true);
}
} else if (tcg_enabled()) {
/* Per-socket SiFive CLINT */
riscv_aclint_swi_create(
s->memmap[VIRT_CLINT].base + i * s->memmap[VIRT_CLINT].size,
base_hartid, hart_count, false);
riscv_aclint_mtimer_create(s->memmap[VIRT_CLINT].base +
i * s->memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count,
RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME,
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
}

/* Per-socket interrupt controller */
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
s->irqchip[i] = virt_create_plic(s->memmap, i,
base_hartid, hart_count);
} else {
s->irqchip[i] = virt_create_aia(s->aia_type, s->aia_guests,
s->memmap, i, base_hartid,
hart_count);
}

/* Try to use different IRQCHIP instance based device type */
if (i == 0) {
mmio_irqchip = s->irqchip[i];
virtio_irqchip = s->irqchip[i];
pcie_irqchip = s->irqchip[i];
}
if (i == 1) {
virtio_irqchip = s->irqchip[i];
pcie_irqchip = s->irqchip[i];
}
if (i == 2) {
pcie_irqchip = s->irqchip[i];
}
}

if (kvm_enabled() && virt_use_kvm_aia_aplic_imsic(s->aia_type)) {
kvm_riscv_aia_create(machine, IMSIC_MMIO_GROUP_MIN_SHIFT,
VIRT_IRQCHIP_NUM_SOURCES, VIRT_IRQCHIP_NUM_MSIS,
s->memmap[VIRT_APLIC_S].base,
s->memmap[VIRT_IMSIC_S].base,
s->aia_guests);
}

if (riscv_is_32bit(&s->soc[0])) {
#if HOST_LONG_BITS == 64
/* limit RAM size in a 32-bit system */
if (machine->ram_size > 10 * GiB) {
machine->ram_size = 10 * GiB;
error_report("Limiting RAM size to 10 GiB");
}
#endif
virt_high_pcie_memmap.base = VIRT32_HIGH_PCIE_MMIO_BASE;
virt_high_pcie_memmap.size = VIRT32_HIGH_PCIE_MMIO_SIZE;
} else {
virt_high_pcie_memmap.size = VIRT64_HIGH_PCIE_MMIO_SIZE;
virt_high_pcie_memmap.base = s->memmap[VIRT_DRAM].base +
machine->ram_size;
virt_high_pcie_memmap.base =
ROUND_UP(virt_high_pcie_memmap.base, virt_high_pcie_memmap.size);
}

/* register system main memory (actual RAM) */
memory_region_add_subregion(system_memory, s->memmap[VIRT_DRAM].base,
machine->ram);

/* boot rom */
memory_region_init_rom(mask_rom, NULL, "riscv_virt_board.mrom",
s->memmap[VIRT_MROM].size, &error_fatal);
memory_region_add_subregion(system_memory, s->memmap[VIRT_MROM].base,
mask_rom);

/*
* Init fw_cfg. Must be done before riscv_load_fdt, otherwise the
* device tree cannot be altered and we get FDT_ERR_NOSPACE.
*/
s->fw_cfg = create_fw_cfg(machine, s->memmap[VIRT_FW_CFG].base);
rom_set_fw(s->fw_cfg);

/* SiFive Test MMIO device */
sifive_test_create(s->memmap[VIRT_TEST].base);

/* VirtIO MMIO devices */
for (i = 0; i < VIRTIO_COUNT; i++) {
sysbus_create_simple("virtio-mmio",
s->memmap[VIRT_VIRTIO].base + i * s->memmap[VIRT_VIRTIO].size,
qdev_get_gpio_in(virtio_irqchip, VIRTIO_IRQ + i));
}

gpex_pcie_init(system_memory, pcie_irqchip, s);

create_platform_bus(s, mmio_irqchip);

serial_mm_init(system_memory, s->memmap[VIRT_UART0].base,
0, qdev_get_gpio_in(mmio_irqchip, UART0_IRQ), 399193,
serial_hd(0), DEVICE_LITTLE_ENDIAN);

sysbus_create_simple("goldfish_rtc", s->memmap[VIRT_RTC].base,
qdev_get_gpio_in(mmio_irqchip, RTC_IRQ));

for (i = 0; i < ARRAY_SIZE(s->flash); i++) {
/* Map legacy -drive if=pflash to machine properties */
pflash_cfi01_legacy_drive(s->flash[i],
drive_get(IF_PFLASH, 0, i));
}
virt_flash_map(s, system_memory);

/* load/create device tree */
if (machine->dtb) {
machine->fdt = load_device_tree(machine->dtb, &s->fdt_size);
if (!machine->fdt) {
error_report("load_device_tree() failed");
exit(1);
}
} else {
create_fdt(s);
}

if (virt_is_iommu_sys_enabled(s)) {
DeviceState *iommu_sys = qdev_new(TYPE_RISCV_IOMMU_SYS);

object_property_set_uint(OBJECT(iommu_sys), "addr",
s->memmap[VIRT_IOMMU_SYS].base,
&error_fatal);
object_property_set_uint(OBJECT(iommu_sys), "base-irq",
IOMMU_SYS_IRQ,
&error_fatal);
object_property_set_link(OBJECT(iommu_sys), "irqchip",
OBJECT(mmio_irqchip),
&error_fatal);

sysbus_realize_and_unref(SYS_BUS_DEVICE(iommu_sys), &error_fatal);
}

s->machine_done.notify = virt_machine_done;
qemu_add_machine_init_done_notifier(&s->machine_done);
}

别看这个函数很长,实际上它做的事情很简单:

1
2
3
4
virt_machine_init()
riscv_socket_count() // 创建 CPU Socket
memory_region_init() // 初始化内存区域
create_fdt() // 生成设备树(DTB)

CPU socket 主要是可以按照簇(cluster)来创建多组 CPU 核心,然后按照地址空间初始化各种设备, 最后为 virt 在内存中生产一个设备树,方便运行 linux kernel。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
┌───────────────────────────────────────────────────────────────┐
RISC-V VIRT Machine
(created by virt_machine_init)
└───────────────────────────────────────────────────────────────┘

CPU & Interrupt Subsystem
────────────────────────────────────────────────────────────────
┌───────────────┐ ┌───────────────┐ ┌───────────────┐
Socket 0 │ │ Socket 1 │ │ Socket 2...
(Hart Array) │ │ (Hart Array) │ │ (Hart Array)
harts 0..X │ │ harts N..M │ │ ...
└──────┬────────┘ └──────┬────────┘ └───────┬────────┘
│ │ │
│ │ │
┌────▼─────┐ ┌────▼─────┐ ┌────▼─────┐
ACLINT / │ │ ACLINT / │ │ ACLINT /
CLINT │ │ CLINT │ │ CLINT
└────┬─────┘ └────┬─────┘ └─────┬────┘
│ │ │
▼ ▼ ▼
┌───────────┐ ┌───────────┐ ┌───────────┐
IRQCHIP │ │ IRQCHIP │ │ IRQCHIP
PLIC/APLIC│ │ PLIC/APLIC│ │ PLIC/APLIC
+ IMSIC │ │ + IMSIC │ │ + IMSIC
└───────────┘ └───────────┘ └───────────┘

(其中:)
Socket0 IRQCHIP → 负责 MMIO + VirtIO + PCIe
Socket1 IRQCHIP → 负责 VirtIO + PCIe
Socket2 IRQCHIP → 负责 PCIe


Memory Subsystem
────────────────────────────────────────────────────────────────
Physical Address Space (system_memory)
┌───────────────────────────────────────────────────────────────┐
0x0000_0000 ───────────────────────────────────────────────┐ │
MROM (boot ROM) │ │
├────────────────────────────────────────────────────────────┤ │
DRAM (RAM) │ │
├────────────────────────────────────────────────────────────┤ │
Device MMIO │ │
│ • CLINT / ACLINT │ │
│ • PLIC / APLIC + IMSIC │ │
│ • UART0 │ │
│ • RTC │ │
│ • fw_cfg │ │
│ • flash │ │
│ • VirtIO-mmio buses │ │
│ • PCIE ECAM + MMIO window │ │
└───────────────────────────────────────────────────────────────┘


Platform & IO Devices
────────────────────────────────────────────────────────────────
┌───────────────────────┐
UART0 (serial) │ → IRQ via mmio_irqchip
├───────────────────────┤
RTC (goldfish) │ → IRQ via mmio_irqchip
├───────────────────────┤
Test Device(sifive_test)
├───────────────────────┤
Flash (pflash)
├───────────────────────┤
VirtIO-MMIO(0..7) │ → IRQ via virtio_irqchip
├───────────────────────┤
PCIE Root Complex │ → IRQ via pcie_irqchip
└───────────────────────┘

Firmware and Configuration
────────────────────────────────────────────────────────────────
fw_cfg — 用于传递 kernel / initrd / cmdline
FDT (DTB) — 机器设备树,描述上述资源
machine_done — 初始化结束回调


那么,是在什么位置加载的 OpenSBI 二进制程序呢?

加载客户机程序

有一点可以明确,那么肯定是存储 OpenSBI 的设备模型先被创建并初始化好,才能加载客户机程序二进制 数据进去,按照这个思路,我们可以找到如下代码(实际上 QEMU 是在整个 virt Machine 就绪以后, 才开始加载客户机程序,它被安排在 machine_done 中实现):

virt_machine_done

1
2
3
4
5
6
7
8
9
10
11
12
13
14
static void virt_machine_done(Notifier *notifier, void *data)
{
RISCVVirtState *s = container_of(notifier, RISCVVirtState,
machine_done);
MachineState *machine = MACHINE(s);
hwaddr start_addr = s->memmap[VIRT_DRAM].base;
target_ulong firmware_end_addr, kernel_start_addr;
const char *firmware_name = riscv_default_firmware_name(&s->soc[0]);
...

firmware_end_addr = riscv_find_and_load_firmware(machine, firmware_name,
&start_addr, NULL);
....
}
  • firmware_name被复制了 riscv_default_firmware_name(&s->soc[0])
  • start_addr被赋值了s->memmap[VIRT_DRAM].base

riscv_default_firmware_name

hw/riscv/boot.c

1
2
3
4
5
6
7
8
9
const char *riscv_default_firmware_name(RISCVHartArrayState *harts)
{
if (riscv_is_32bit(harts)) {
return RISCV32_BIOS_BIN;
}

return RISCV64_BIOS_BIN;
}

include/hw/riscv/boot.c中定义了

1
2
#define RISCV32_BIOS_BIN    "opensbi-riscv32-generic-fw_dynamic.bin"
#define RISCV64_BIOS_BIN "opensbi-riscv64-generic-fw_dynamic.bin"

因此firmware_name被赋值为opensbi-riscv64-generic-fw_dynamic.bin

riscv_find_and_load_firmware

riscv_find_and_load_firmware的第二个参数是default_machine_firmware,字符串类型,而我们传入的就是opensbi-riscv64-generic-fw_dynamic.bin,这个函数调用riscv_find_firmware获取firmware字符串

hw/riscv/boot.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
target_ulong riscv_find_and_load_firmware(MachineState *machine,
const char *default_machine_firmware,
hwaddr *firmware_load_addr,
symbol_fn_t sym_cb)
{
char *firmware_filename;
target_ulong firmware_end_addr = *firmware_load_addr;

firmware_filename = riscv_find_firmware(machine->firmware,
default_machine_firmware);

if (firmware_filename) {
/* If not "none" load the firmware */
firmware_end_addr = riscv_load_firmware(firmware_filename,
firmware_load_addr, sym_cb);
g_free(firmware_filename);
}

return firmware_end_addr;
}

riscv_find_firmware

hw/riscv/boot.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
char *riscv_find_firmware(const char *firmware_filename,
const char *default_machine_firmware)
{
char *filename = NULL;

if ((!firmware_filename) || (!strcmp(firmware_filename, "default"))) {
/*
* The user didn't specify -bios, or has specified "-bios default".
* That means we are going to load the OpenSBI binary included in
* the QEMU source.
*/
filename = riscv_find_bios(default_machine_firmware);
} else if (strcmp(firmware_filename, "none")) {
filename = riscv_find_bios(firmware_filename);
}

return filename;
}

这里看到如果我们没有通过-bios指定bios会加载default_machine_firmware也就是OpenSBI

riscv_load_firmware

riscv_load_firmware会加载firmware

hw/riscv/boot.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
target_ulong riscv_load_firmware(const char *firmware_filename,
hwaddr *firmware_load_addr,
symbol_fn_t sym_cb)
{
uint64_t firmware_entry, firmware_end;
ssize_t firmware_size;

g_assert(firmware_filename != NULL);

if (load_elf_ram_sym(firmware_filename, NULL, NULL, NULL,
&firmware_entry, NULL, &firmware_end, NULL,
0, EM_RISCV, 1, 0, NULL, true, sym_cb) > 0) {
*firmware_load_addr = firmware_entry;
return firmware_end;
}

firmware_size = load_image_targphys_as(firmware_filename,
*firmware_load_addr,
current_machine->ram_size, NULL);

if (firmware_size > 0) {
return *firmware_load_addr + firmware_size;
}

error_report("could not load firmware '%s'", firmware_filename);
exit(1);
}

vCPU 执行的第一条指令

运行qemu-system-riscv64

1
$ ./build/qemu-system-riscv64 -M virt -s -S -nographic

这个命令让 QEMU 创建了一个 virt Machine,以 nographic 模式运行,串口输出到终端默认 bios 使用 OpenSBI,并且开启了 gdbstub 远程调试功能,允许 riscv64-gdb 来调试客户机程序,默认端口号是 1234,同时停在第一条指令,等待 gdb 的连接。

然后

1
$ gdb -ex "set architecture riscv64" -ex "target remote localhost:1234"

vCPU的第一条指令

此处对应源码初始化的位置在:

target/riscv/cpu.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
static void riscv_cpu_reset_hold(Object *obj, ResetType type)
{
#ifndef CONFIG_USER_ONLY
uint8_t iprio;
int i, irq, rdzero;
#endif
CPUState *cs = CPU(obj);
RISCVCPU *cpu = RISCV_CPU(cs);
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
CPURISCVState *env = &cpu->env;

if (mcc->parent_phases.hold) {
mcc->parent_phases.hold(obj, type);
}
#ifndef CONFIG_USER_ONLY
env->misa_mxl = mcc->def->misa_mxl_max;
env->priv = PRV_M;
env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
if (env->misa_mxl > MXL_RV32) {
/*
* The reset status of SXL/UXL is undefined, but mstatus is WARL
* and we must ensure that the value after init is valid for read.
*/
env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
if (riscv_has_ext(env, RVH)) {
env->vsstatus = set_field(env->vsstatus,
MSTATUS64_SXL, env->misa_mxl);
env->vsstatus = set_field(env->vsstatus,
MSTATUS64_UXL, env->misa_mxl);
env->mstatus_hs = set_field(env->mstatus_hs,
MSTATUS64_SXL, env->misa_mxl);
env->mstatus_hs = set_field(env->mstatus_hs,
MSTATUS64_UXL, env->misa_mxl);
}
if (riscv_cpu_cfg(env)->ext_smdbltrp) {
env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1);
}
}
env->mcause = 0;
env->miclaim = MIP_SGEIP;
env->pc = env->resetvec;//设置PC值,第一条指令的位置
env->bins = 0;
env->two_stage_lookup = false;

env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
(!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ?
MENVCFG_ADUE : 0);
env->henvcfg = 0;

/* Initialized default priorities of local interrupts. */
for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
iprio = riscv_cpu_default_priority(i);
env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
env->hviprio[i] = 0;
}
i = 0;
while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
if (!rdzero) {
env->hviprio[irq] = env->miprio[irq];
}
i++;
}

/*
* Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
* extension is enabled.
*/
if (riscv_has_ext(env, RVH)) {
env->mideleg |= HS_MODE_INTERRUPTS;
}

/*
* Clear mseccfg and unlock all the PMP entries upon reset.
* This is allowed as per the priv and smepmp specifications
* and is needed to clear stale entries across reboots.
*/
if (riscv_cpu_cfg(env)->ext_smepmp) {
env->mseccfg = 0;
}

pmp_unlock_entries(env);
#else
env->priv = PRV_U;
env->senvcfg = 0;
env->menvcfg = 0;
#endif

/* on reset elp is clear */
env->elp = false;
/* on reset ssp is set to 0 */
env->ssp = 0;

env->xl = riscv_cpu_mxl(env);
cs->exception_index = RISCV_EXCP_NONE;
env->load_res = -1;
set_default_nan_mode(1, &env->fp_status);
/* Default NaN value: sign bit clear, frac msb set */
set_float_default_nan_pattern(0b01000000, &env->fp_status);
env->vill = true;

#ifndef CONFIG_USER_ONLY
if (cpu->cfg.debug) {
riscv_trigger_reset_hold(env);
}

if (cpu->cfg.ext_smrnmi) {
env->rnmip = 0;
env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
}

if (kvm_enabled()) {
kvm_riscv_reset_vcpu(cpu);
}
#endif
}

可以看到 env->pc 被赋值为 env->resetvec,而 resetvec 的默认值是:

1
2
3
//cpu_bits.h
/* Default Reset Vector address */
#define DEFAULT_RSTVEC 0x1000

参考: