if (machine_type) { machine_class = find_machine(machine_type, machines); if (!machine_class) { error_setg(errp, "unsupported machine type: \"%s\"", machine_type); } qdict_del(qdict, "type"); } else { machine_class = find_default_machine(machines); if (!machine_class) { error_setg(errp, "No machine specified, and there is no default"); } }
if (!machine_class) { error_append_hint(errp, "Use -machine help to list supported machines\n"); } return machine_class; }
if (machine_class->minimum_page_bits) { if (!set_preferred_target_page_bits(machine_class->minimum_page_bits)) { /* This would be a board error: specifying a minimum smaller than * a target's compile-time fixed setting. */ g_assert_not_reached(); } }
cpu_exec_init_all();
if (machine_class->hw_version) { qemu_set_hw_version(machine_class->hw_version); }
/* * Get the default machine options from the machine if it is not already * specified either by the configuration file or by the command line. */ if (machine_class->default_machine_opts) { QDict *default_opts = keyval_parse(machine_class->default_machine_opts, NULL, NULL, &error_abort); qemu_apply_legacy_machine_options(default_opts); object_set_properties_from_keyval(OBJECT(current_machine), default_opts, false, &error_abort); qobject_unref(default_opts); } }
/* This checkpoint is required by replay to separate prior clock reading from the other reads, because timer polling functions query clock values from the log. */ replay_checkpoint(CHECKPOINT_INIT);
if (!xen_enabled()) { /* On 32-bit hosts, QEMU is limited by virtual address space */ if (machine->ram_size > (2047 << 20) && HOST_LONG_BITS == 32) { error_setg(errp, "at most 2047 MB RAM can be simulated"); return; } }
if (machine->memdev) { ram_addr_t backend_size = object_property_get_uint(OBJECT(machine->memdev), "size", &error_abort); if (backend_size != machine->ram_size) { error_setg(errp, "Machine memory size does not match the size of the memory backend"); return; } } elseif (machine_class->default_ram_id && machine->ram_size && numa_uses_legacy_mem()) { if (object_property_find(object_get_objects_root(), machine_class->default_ram_id)) { error_setg(errp, "object's id '%s' is reserved for the default" " RAM backend, it can't be used for any other purposes", machine_class->default_ram_id); error_append_hint(errp, "Change the object's 'id' to something else or disable" " automatic creation of the default RAM backend by setting" " 'memory-backend=%s' with '-machine'.\n", machine_class->default_ram_id); return; }
if (!machine_class->create_default_memdev(current_machine, mem_path, errp)) { return; } }
if (machine->numa_state) { numa_complete_configuration(machine); if (machine->numa_state->num_nodes) { machine_numa_finish_cpu_init(machine); if (machine_class->cpu_cluster_has_numa_boundary) { validate_cpu_cluster_to_numa_boundary(machine); } } }
if (!machine->ram && machine->memdev) { machine->ram = machine_consume_memdev(machine, machine->memdev); }
/* Check if the CPU type is supported */ if (machine->cpu_type && !is_cpu_type_supported(machine, errp)) { return; }
if (machine->cgs) { /* * With confidential guests, the host can't see the real * contents of RAM, so there's no point in it trying to merge * areas. */ machine_set_mem_merge(OBJECT(machine), false, &error_abort);
/* * Virtio devices can't count on directly accessing guest * memory, so they need iommu_platform=on to use normal DMA * mechanisms. That requires also disabling legacy virtio * support for those virtio pci devices which allow it. */ object_register_sugar_prop(TYPE_VIRTIO_PCI, "disable-legacy", "on", true); object_register_sugar_prop(TYPE_VIRTIO_DEVICE, "iommu_platform", "on", false); }
if (!cpu->as) { /* If the target cpu hasn't set up any address spaces itself, * give it the default one. */ cpu->num_ases = 1; cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory); }
/* accelerators all implement the AccelOpsClass */ g_assert(cpus_accel != NULL && cpus_accel->create_vcpu_thread != NULL); cpus_accel->create_vcpu_thread(cpu);
while (!cpu->created) { qemu_cond_wait(&qemu_cpu_cond, &bql); } }
MemoryRegion iomem; uint32_t flags; uint32_t lcr;// 线路控制寄存器 uint32_t rsr;// 中断屏蔽寄存器 uint32_t cr;// 状态标志位 uint32_t dmacr; uint32_t int_enabled; uint32_t int_level; uint32_t read_fifo[PL011_FIFO_DEPTH]; /* FIFO 缓冲区 */ uint32_t ilpr; uint32_t ibrd; uint32_t fbrd; uint32_t ifl; int read_pos; int read_count; int read_trigger; CharBackend chr; // 连接宿主终端或文件 qemu_irq irq[6];// 中断信号线 Clock *clk; bool migrate_clk; constunsignedchar *id; /* * Since some users embed this struct directly, we must * ensure that the C struct is at least as big as the Rust one. */ uint8_t padding_for_rust[16]; };
switch (offset >> 2) { case0: /* UARTDR */// 数据寄存器 ch = value; pl011_write_txdata(s, ch); break; case1: /* UARTRSR/UARTECR */ s->rsr = 0; break; case6: /* UARTFR */ /* Writes to Flag register are ignored. */ break; case8: /* UARTILPR */ s->ilpr = value; break; case9: /* UARTIBRD */ s->ibrd = value & IBRD_MASK; pl011_trace_baudrate_change(s); break; case10: /* UARTFBRD */ s->fbrd = value & FBRD_MASK; pl011_trace_baudrate_change(s); break; case11: /* UARTLCR_H */ /* Reset the FIFO state on FIFO enable or disable */ if ((s->lcr ^ value) & LCR_FEN) { pl011_reset_rx_fifo(s); pl011_reset_tx_fifo(s); } if ((s->lcr ^ value) & LCR_BRK) { int break_enable = value & LCR_BRK; qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_BREAK, &break_enable); pl011_loopback_break(s, break_enable); } s->lcr = value; pl011_set_read_trigger(s); break; case12: /* UARTCR */ /* ??? Need to implement the enable bit. */ s->cr = value; pl011_loopback_mdmctrl(s); break; case13: /* UARTIFS */ s->ifl = value; pl011_set_read_trigger(s); break; case14: /* UARTIMSC */ s->int_enabled = value; pl011_update(s); break; case17: /* UARTICR */ s->int_level &= ~value; pl011_update(s); break; case18: /* UARTDMACR */ s->dmacr = value; if (value & 3) { qemu_log_mask(LOG_UNIMP, "pl011: DMA not implemented\n"); } break; default: qemu_log_mask(LOG_GUEST_ERROR, "pl011_write: Bad offset 0x%x\n", (int)offset); } }
staticvoidpl011_receive(void *opaque, constuint8_t *buf, int size) { trace_pl011_receive(size); /* * In loopback mode, the RX input signal is internally disconnected * from the entire receiving logics; thus, all inputs are ignored, * and BREAK detection on RX input signal is also not performed. */ if (pl011_loopback_enabled(opaque)) { return; }
for (int i = 0; i < size; i++) { pl011_fifo_rx_put(opaque, buf[i]); } }
中断处理与状态更新
1 2 3 4 5 6 7 8 9 10 11
staticvoidpl011_update(PL011State *s) { uint32_t flags; int i;
flags = s->int_level & s->int_enabled; trace_pl011_irq_state(flags != 0); for (i = 0; i < ARRAY_SIZE(s->irq); i++) { qemu_set_irq(s->irq[i], (flags & irqmask[i]) != 0); /* 更新中断线 */ } }
/* * We purposely use a thread, so that users are forced to wait for the status * register. */ staticvoid *edu_fact_thread(void *opaque) { EduState *edu = opaque;
/* * If called from iothread context, wake the target cpu in * case its halted. */ if (!qemu_cpu_is_self(cpu)) { qemu_cpu_kick(cpu); } else { qatomic_set(&cpu->neg.icount_decr.u16.high, -1); } }
staticinlineboolcpu_handle_interrupt(CPUState *cpu, TranslationBlock **last_tb) { /* * If we have requested custom cflags with CF_NOIRQ we should * skip checking here. Any pending interrupts will get picked up * by the next TB we execute under normal cflags. */ if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) { returnfalse; }
/* Clear the interrupt flag now since we're processing * cpu->interrupt_request and cpu->exit_request. * Ensure zeroing happens before reading cpu->exit_request or * cpu->interrupt_request (see also smp_wmb in cpu_exit()) */ qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0);
if (unlikely(qatomic_read(&cpu->interrupt_request))) { int interrupt_request; bql_lock(); interrupt_request = cpu->interrupt_request; if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { /* Mask out external interrupts for this step. */ interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; } if (interrupt_request & CPU_INTERRUPT_DEBUG) { cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; cpu->exception_index = EXCP_DEBUG; bql_unlock(); returntrue; } #if !defined(CONFIG_USER_ONLY) if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { /* Do nothing */ } elseif (interrupt_request & CPU_INTERRUPT_HALT) { replay_interrupt(); cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; cpu->halted = 1; cpu->exception_index = EXCP_HLT; bql_unlock(); returntrue; } else { const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
if (interrupt_request & CPU_INTERRUPT_RESET) { replay_interrupt(); tcg_ops->cpu_exec_reset(cpu); bql_unlock(); returntrue; }
/* * The target hook has 3 exit conditions: * False when the interrupt isn't processed, * True when it is, and we should restart on a new TB, * and via longjmp via cpu_loop_exit. */ if (tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {// 调用架构相关中断处理 if (!tcg_ops->need_replay_interrupt || tcg_ops->need_replay_interrupt(interrupt_request)) { replay_interrupt(); } /* * After processing the interrupt, ensure an EXCP_DEBUG is * raised when single-stepping so that GDB doesn't miss the * next instruction. */ if (unlikely(cpu->singlestep_enabled)) { cpu->exception_index = EXCP_DEBUG; bql_unlock(); returntrue; } cpu->exception_index = -1; *last_tb = NULL; } /* The target hook may have updated the 'cpu->interrupt_request'; * reload the 'interrupt_request' value */ interrupt_request = cpu->interrupt_request; } #endif/* !CONFIG_USER_ONLY */ if (interrupt_request & CPU_INTERRUPT_EXITTB) { cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; /* ensure that no TB jump will be modified as the program flow was changed */ *last_tb = NULL; }
/* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ bql_unlock(); }
/* Finally, check if we need to exit to the main loop. */ if (unlikely(qatomic_read(&cpu->exit_request)) || icount_exit_request(cpu)) { qatomic_set(&cpu->exit_request, 0); if (cpu->exception_index == -1) { cpu->exception_index = EXCP_INTERRUPT; } returntrue; }