40 void free()
override { }
44 std::shared_ptr<qemu::Timer> m_deadline_timer;
50 sc_core::sc_event_or_list m_external_ev;
51 sc_core::sc_process_handle m_sc_thread;
53 std::atomic<bool> m_signaled;
54 std::mutex m_signaled_lock;
55 std::condition_variable m_signaled_cond;
57 std::shared_ptr<gs::tlm_quantumkeeper_extended> m_qk;
58 std::atomic<bool> m_finished =
false;
59 std::atomic<bool> m_started =
false;
60 enum { none, start_reset, hold_reset, finish_reset } m_resetting = none;
63 std::mutex m_can_delete;
81 std::atomic<int> m_async_work_outstanding{ 0 };
82 std::mutex m_async_work_mutex;
83 std::condition_variable m_async_work_cv;
84 static constexpr int ASYNC_WORK_TIMEOUT_MS = 500;
92 qemu::Cpu::AsyncJobFn make_tracked_async_job(qemu::Cpu::AsyncJobFn job)
95 std::lock_guard<std::mutex>
lock(m_async_work_mutex);
96 m_async_work_outstanding++;
98 if (m_finished)
return {};
99 return [
this, job = std::move(job)]()
mutable {
111 std::lock_guard<std::mutex>
lock(m_async_work_mutex);
112 m_async_work_outstanding--;
114 m_async_work_cv.notify_all();
121 void create_quantum_keeper()
126 SCP_FATAL(()) <<
"qbox : Sync policy unknown";
136 void set_coroutine_mode()
138 switch (m_qk->get_thread_type()) {
139 case gs::SyncPolicy::SYSTEMC_THREAD:
143 case gs::SyncPolicy::OS_THREAD:
144 m_coroutines =
false;
167 if (m_inst.
get_tcg_mode() != QemuInstance::TCG_SINGLE) {
168 std::lock_guard<std::mutex>
lock(m_signaled_lock);
170 m_signaled_cond.notify_all();
172 std::lock_guard<std::mutex>
lock(m_inst.g_signaled_lock);
173 m_inst.g_signaled =
true;
174 m_inst.g_signaled_cond.notify_all();
182 void watch_external_ev()
196 SCP_TRACE(())(
"QEMU deadline KICK callback");
198 if (!m_finished) m_qemu_kick_ev.async_notify();
210 void deadline_timer_cb()
212 SCP_TRACE(())(
"QEMU deadline timer callback");
217 rearm_deadline_timer();
221 sc_core::sc_time
sc_t = sc_core::sc_time_stamp();
222 if (sc_core::sc_time(
now, sc_core::SC_NS) >
sc_t) {
223 m_qk->set(sc_core::sc_time(
now, sc_core::SC_NS) -
sc_t);
241 if (m_finished)
return;
244 m_on_sysc.
run_on_sysc([
this]() { wait(m_external_ev); });
246 if (m_inst.
get_tcg_mode() != QemuInstance::TCG_SINGLE) {
247 std::unique_lock<std::mutex>
lock(m_signaled_lock);
248 m_signaled_cond.wait(
lock, [
this] {
return m_signaled || m_finished; });
251 std::unique_lock<std::mutex>
lock(m_inst.g_signaled_lock);
252 m_inst.g_signaled_cond.wait(
lock, [
this] {
return m_inst.g_signaled || m_finished; });
253 m_inst.g_signaled =
false;
256 if (m_finished)
return;
264 void rearm_deadline_timer()
268 m_deadline_timer->mod(m_inst.
get().get_virtual_clock() + m_quantum_ns);
275 void prepare_run_cpu()
284 if (m_inst.
get_tcg_mode() == QemuInstance::TCG_SINGLE) {
285 while (!m_inst.can_run() && !m_finished) {
289 while (!m_cpu.can_run() && !m_finished) {
290 if (!m_coroutines && !m_inst.is_tcg_enabled()) {
294 SCP_TRACE(())(
"Stopping QK (accelerator)");
303 m_cpu.set_soft_stopped(
false);
310 m_inst.
get().lock_iothread();
324 for (
int i = 0;
i < m_inst.number_devices();
i++) {
325 if ((m_inst.
get().get_virtual_clock() ==
last_vclock) && (m_cpu.can_run())) {
335 void sync_with_kernel()
339 m_cpu.set_soft_stopped(
true);
341 m_inst.
get().unlock_iothread();
342 if (m_finished)
return;
346 sc_core::sc_time
sc_t = sc_core::sc_time_stamp();
347 if (sc_core::sc_time(
now, sc_core::SC_NS) >
sc_t) {
348 m_qk->set(sc_core::sc_time(
now, sc_core::SC_NS) -
sc_t);
359 void end_of_loop_cb()
362 if (m_finished)
return;
368 if (!m_started)
return;
371 m_inst.
get().coroutine_yield();
373 std::lock_guard<std::mutex>
lock(m_can_delete);
382 void mainloop_thread_coroutine()
384 m_cpu.register_thread();
386 for (; !m_finished;) {
394 cci::cci_param<unsigned int> p_gdb_port;
405 , m_qemu_kick_ev(
false)
407 , p_gdb_port(
"gdb_port", 0,
"Wait for gdb connection on TCP port <gdb_port>")
408 , socket(
"mem", *
this, inst)
410 using namespace std::placeholders;
412 m_external_ev |= m_qemu_kick_ev;
414 auto haltcb = std::bind(&QemuCpu::halt_cb,
this,
_1);
415 halt.register_value_changed_cb(
haltcb);
416 auto resetcb = std::bind(&QemuCpu::reset_cb,
this,
_1);
417 reset.register_value_changed_cb(
resetcb);
419 create_quantum_keeper();
420 set_coroutine_mode();
426 m_inst.add_dev(
this);
428 m_start_reset_done_ev.async_detach_suspending();
441 std::unique_lock<std::mutex>
lock(m_async_work_mutex);
442 if (!m_async_work_cv.wait_for(
lock, std::chrono::milliseconds(ASYNC_WORK_TIMEOUT_MS),
443 [
this] { return m_async_work_outstanding == 0; })) {
444 SCP_WARN(()) <<
"Timeout waiting for " << m_async_work_outstanding.load()
445 <<
" outstanding async work(s) to complete during destruction";
451 while (!m_can_delete.try_lock()) {
454 m_inst.del_dev(
this);
459 void end_of_simulation()
override
461 if (m_finished)
return;
464 if (!m_cpu.valid()) {
485 m_inst.
get().lock_iothread();
487 m_cpu.clear_callbacks();
491 m_inst.
get().unlock_iothread();
509 m_inst.
get().unlock_iothread();
511 m_cpu.set_unplug(
true);
517 bool can_run()
override {
return m_cpu.can_run(); }
519 void before_end_of_elaboration()
override
521 QemuDevice::before_end_of_elaboration();
526 m_sc_thread = sc_core::sc_spawn(std::bind(&QemuCpu::mainloop_thread_coroutine,
this));
529 socket.init(m_dev,
"memory");
531 m_cpu.set_soft_stopped(
true);
533 m_cpu.set_end_of_loop_callback(std::bind(&QemuCpu::end_of_loop_cb,
this));
534 m_cpu.set_kick_callback(std::bind(&QemuCpu::kick_cb,
this));
536 m_deadline_timer = m_inst.
get().timer_new();
537 m_deadline_timer->set_callback(std::bind(&QemuCpu::deadline_timer_cb,
this));
539 m_cpu_hint_ext.set_cpu(m_cpu);
542 void halt_cb(
const bool&
val)
547 m_deadline_timer->del();
551 rearm_deadline_timer();
553 m_inst.
get().lock_iothread();
555 m_inst.
get().unlock_iothread();
556 m_qemu_kick_ev.async_notify();
561 void reset_cb(
const bool&
val)
564 if (m_finished)
return;
567 if (m_resetting != none)
return;
569 m_resetting = start_reset;
570 m_cpu.async_safe_run(make_tracked_async_job([
this] {
572 m_resetting = hold_reset;
573 m_start_reset_done_ev.async_notify();
576 if (m_resetting == none)
return;
577 while (m_resetting == start_reset) {
579 sc_core::wait(m_start_reset_done_ev);
581 m_inst.
get().lock_iothread();
583 m_inst.
get().unlock_iothread();
587 m_qemu_kick_ev.async_notify();
591 m_qemu_kick_ev.async_notify();
593 virtual void end_of_elaboration()
override
595 QemuDevice::end_of_elaboration();
597 if (!p_gdb_port.is_default_value()) {
598 std::stringstream
ss;
599 SCP_INFO(()) <<
"Starting gdb server on TCP port " << p_gdb_port;
600 ss <<
"tcp::" << p_gdb_port;
601 m_inst.
get().start_gdb_server(
ss.str());
605 virtual void start_of_simulation()
override
607 m_quantum_ns =
int64_t(tlm_utils::tlm_quantumkeeper::get_global_quantum().
to_seconds() * 1
e9);
609 QemuDevice::start_of_simulation();
610 if (m_inst.
get_tcg_mode() == QemuInstance::TCG_SINGLE) {
611 if (m_inst.can_run()) {
614 }
else if (!m_coroutines) {
646 m_inst.
get().lock_iothread();
647 m_cpu.set_soft_stopped(
false);
648 rearm_deadline_timer();
650 m_inst.
get().unlock_iothread();
657 m_cpu.set_vcpu_dirty(
true);
661 virtual void initiator_customize_tlm_payload(TlmPayload&
payload)
override
664 payload.set_extension(&m_cpu_hint_ext);
667 virtual void initiator_tidy_tlm_payload(TlmPayload&
payload)
override {
payload.clear_extension(&m_cpu_hint_ext); }
673 virtual sc_core::sc_time initiator_get_local_time()
override
675 using sc_core::sc_time;
676 using sc_core::SC_NS;
680 if (m_finished)
return sc_core::SC_ZERO_TIME;
683 sc_core::sc_time
sc_t = sc_core::sc_time_stamp();
686 return m_qk->get_local_time();
688 return sc_core::SC_ZERO_TIME;
696 virtual void initiator_set_local_time(
const sc_core::sc_time&
t)
override
698 if (m_finished)
return;
701 if (m_qk->need_sync()) {
711 virtual void initiator_async_run(qemu::Cpu::AsyncJobFn job)
override
713 if (!m_finished) m_cpu.async_run(make_tracked_async_job(std::move(job)));