quic/qbox
Loading...
Searching...
No Matches
cpu.h
1/*
2 * This file is part of libqbox
3 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * Author: GreenSocs 2021
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9#ifndef _LIBQBOX_COMPONENTS_CPU_CPU_H
10#define _LIBQBOX_COMPONENTS_CPU_CPU_H
11
12#include <sstream>
13#include <mutex>
14#include <condition_variable>
15#include <atomic>
16#include <chrono>
17
18#include <tlm>
19#include <tlm_utils/simple_initiator_socket.h>
20#include <tlm_utils/simple_target_socket.h>
21#include <cci_configuration>
22
23#include <libgssync.h>
24
25#include "device.h"
26#include "ports/initiator.h"
27#include "tlm-extensions/qemu-cpu-hint.h"
28#include "ports/qemu-target-signal-socket.h"
29
31{
32protected:
33 /*
34 * We have a unique copy per CPU of this extension, which is not dynamically allocated.
35 * We really don't want the default implementation to call delete on it...
36 */
38 {
39 public:
40 void free() override { /* leave my extension alone, TLM */ }
41 };
42
43 gs::runonsysc m_on_sysc;
44 std::shared_ptr<qemu::Timer> m_deadline_timer;
45 bool m_coroutines;
46
47 qemu::Cpu m_cpu;
48
49 gs::async_event m_qemu_kick_ev;
50 sc_core::sc_event_or_list m_external_ev;
51 sc_core::sc_process_handle m_sc_thread; // used for co-routines
52
53 std::atomic<bool> m_signaled;
54 std::mutex m_signaled_lock;
55 std::condition_variable m_signaled_cond;
56
57 std::shared_ptr<gs::tlm_quantumkeeper_extended> m_qk;
58 std::atomic<bool> m_finished = false;
59 std::atomic<bool> m_started = false;
60 enum { none, start_reset, hold_reset, finish_reset } m_resetting = none;
61 gs::async_event m_start_reset_done_ev;
62
63 std::mutex m_can_delete;
64 QemuCpuHintTlmExtension m_cpu_hint_ext;
65
66 uint64_t m_quantum_ns; // For convenience
67
68 /*
69 * Outstanding async work tracking.
70 *
71 * When a job is queued via async_run or async_safe_run we increment this
72 * counter. The wrapper decrements it (and signals the condvar) once the
73 * job has finished executing. The destructor waits for the counter to
74 * reach zero before tearing down the object so that any in-flight job
75 * cannot call back into a destroyed member (e.g. m_start_reset_done_ev).
76 *
77 * A timeout is used as a safety valve: if the simulation is exiting
78 * because the async job itself faulted and will never complete, we do not
79 * want to hang forever.
80 */
81 std::atomic<int> m_async_work_outstanding{ 0 };
82 std::mutex m_async_work_mutex;
83 std::condition_variable m_async_work_cv;
84 static constexpr int ASYNC_WORK_TIMEOUT_MS = 500;
85
86 /*
87 * Wrap @job so that m_async_work_outstanding is incremented before the
88 * job is queued and decremented (with a condvar notification) after it
89 * returns. Must only be called when !m_finished so that the counter
90 * cannot be incremented after the destructor has started waiting.
91 */
92 qemu::Cpu::AsyncJobFn make_tracked_async_job(qemu::Cpu::AsyncJobFn job)
93 {
94 {
95 std::lock_guard<std::mutex> lock(m_async_work_mutex);
96 m_async_work_outstanding++;
97 }
98 if (m_finished) return {}; // already shutting down
99 return [this, job = std::move(job)]() mutable {
100 /*
101 * If the CPU is already shutting down (m_finished set by
102 * end_of_simulation()), skip the job body entirely. The job
103 * will never be able to complete safely anyway (the CPU is being
104 * halted/unplugged), and skipping lets the destructor's wait_for
105 * exit immediately rather than burning the full timeout per CPU.
106 */
107 if (!m_finished) {
108 job();
109 }
110 {
111 std::lock_guard<std::mutex> lock(m_async_work_mutex);
112 m_async_work_outstanding--;
113 }
114 m_async_work_cv.notify_all();
115 };
116 }
117
118 /*
119 * Request quantum keeper from instance
120 */
121 void create_quantum_keeper()
122 {
123 m_qk = m_inst.create_quantum_keeper();
124
125 if (!m_qk) {
126 SCP_FATAL(()) << "qbox : Sync policy unknown";
127 }
128
129 m_qk->reset();
130 }
131
132 /*
133 * Given the quantum keeper nature (synchronous or asynchronous) and the
134 * p_icount parameter, we can configure the QEMU instance accordingly.
135 */
136 void set_coroutine_mode()
137 {
138 switch (m_qk->get_thread_type()) {
139 case gs::SyncPolicy::SYSTEMC_THREAD:
140 m_coroutines = true;
141 break;
142
143 case gs::SyncPolicy::OS_THREAD:
144 m_coroutines = false;
145 break;
146 }
147 }
148
149 /*
150 * ---- CPU loop related methods ----
151 */
152
153 /*
154 * Called by watch_external_ev and kick_cb in MTTCG mode. This keeps track
155 * of an external event in case the CPU thread just released the iothread
156 * and is going to call wait_for_work. This is needed to avoid missing an
157 * event and going to sleep while we should effectively wake-up.
158 *
159 * The coroutine mode does not use this method and use the SystemC kernel
160 * as a mean of synchronization. If an asynchronous event is triggered
161 * while the CPU thread go to sleep, the fact that the CPU thread is also
162 * the SystemC thread will ensure correct ordering of the events.
163 */
164 void set_signaled()
165 {
166 assert(!m_coroutines);
167 if (m_inst.get_tcg_mode() != QemuInstance::TCG_SINGLE) {
168 std::lock_guard<std::mutex> lock(m_signaled_lock);
169 m_signaled = true;
170 m_signaled_cond.notify_all();
171 } else {
172 std::lock_guard<std::mutex> lock(m_inst.g_signaled_lock);
173 m_inst.g_signaled = true;
174 m_inst.g_signaled_cond.notify_all();
175 }
176 }
177
178 /*
179 * SystemC thread watching the m_external_ev event list. Only used in MTTCG
180 * mode.
181 */
182 void watch_external_ev()
183 {
184 for (;;) {
185 wait(m_external_ev);
186 set_signaled();
187 }
188 }
189
190 /*
191 * Called when the CPU is kicked. We notify the corresponding async event
192 * to wake the CPU up if it was sleeping waiting for work.
193 */
194 void kick_cb()
195 {
196 SCP_TRACE(())("QEMU deadline KICK callback");
197 if (m_coroutines) {
198 if (!m_finished) m_qemu_kick_ev.async_notify();
199 } else {
200 set_signaled();
201 }
202 }
203
204 /*
205 * Called by the QEMU iothread when the deadline timer expires. We kick the
206 * CPU out of its execution loop for it to call the end_of_loop_cb callback.
207 * However, we should also handle the case that qemu is currently in 'sync'
208 * - by setting the time here, we will nudge the sync thread.
209 */
210 void deadline_timer_cb()
211 {
212 SCP_TRACE(())("QEMU deadline timer callback");
213 // All syncing will be done in end_of_loop_cb
214 m_cpu.kick();
215 // Rearm timer for next time ....
216 if (!m_finished) {
217 rearm_deadline_timer();
218
219 /* Take this opportunity to set the time */
220 int64_t now = m_inst.get().get_virtual_clock();
221 sc_core::sc_time sc_t = sc_core::sc_time_stamp();
222 if (sc_core::sc_time(now, sc_core::SC_NS) > sc_t) {
223 m_qk->set(sc_core::sc_time(now, sc_core::SC_NS) - sc_t);
224 }
225 }
226 }
227
228 /*
229 * The CPU does not have work anymore. Pause the CPU thread until we have
230 * some work to do.
231 *
232 * - In coroutine mode, this method runs a wait on the SystemC kernel,
233 * waiting for the m_external_ev list.
234 * - In MTTCG mode, we wait on the m_signaled_cond condition, signaled when
235 * set_signaled is called.
236 */
237 void wait_for_work()
238 {
239 SCP_TRACE(())("Wait for work");
240 m_qk->stop();
241 if (m_finished) return;
242
243 if (m_coroutines) {
244 m_on_sysc.run_on_sysc([this]() { wait(m_external_ev); });
245 } else {
246 if (m_inst.get_tcg_mode() != QemuInstance::TCG_SINGLE) {
247 std::unique_lock<std::mutex> lock(m_signaled_lock);
248 m_signaled_cond.wait(lock, [this] { return m_signaled || m_finished; });
249 m_signaled = false;
250 } else {
251 std::unique_lock<std::mutex> lock(m_inst.g_signaled_lock);
252 m_inst.g_signaled_cond.wait(lock, [this] { return m_inst.g_signaled || m_finished; });
253 m_inst.g_signaled = false;
254 }
255 }
256 if (m_finished) return;
257 SCP_TRACE(())("Have work, running CPU");
258 m_qk->start();
259 }
260
261 /*
262 * Set the deadline timer to trigger at the end of the time budget
263 */
264 void rearm_deadline_timer()
265 {
266 // This is a simple "every quantum" tick. Whether the QK makes use of it or not
267 // is down to the sync policy
268 m_deadline_timer->mod(m_inst.get().get_virtual_clock() + m_quantum_ns);
269 }
270
271 /*
272 * Called before running the CPU. Lock the BQL and set the deadline timer
273 * to not run beyond the time budget.
274 */
275 void prepare_run_cpu()
276 {
277 /*
278 * The QEMU CPU loop expect us to enter it with the iothread mutex locked.
279 * It is then unlocked when we come back from the CPU loop, in
280 * sync_with_kernel().
281 */
282
283 SCP_TRACE(())("Prepare run");
284 if (m_inst.get_tcg_mode() == QemuInstance::TCG_SINGLE) {
285 while (!m_inst.can_run() && !m_finished) {
286 wait_for_work();
287 }
288 } else {
289 while (!m_cpu.can_run() && !m_finished) {
290 if (!m_coroutines && !m_inst.is_tcg_enabled()) {
291 // For hardware accelerators (KVM/HVF), break back into the
292 // QEMU loop so the vCPU thread can handle signals delivered
293 // via ioctl. TCG does not need this.
294 SCP_TRACE(())("Stopping QK (accelerator)");
295 m_qk->stop();
296 break;
297 }
298 wait_for_work();
299 }
300 }
301
302 if (m_started) {
303 m_cpu.set_soft_stopped(false);
304 }
305 /*
306 * The QEMU CPU loop expect us to enter it with the iothread mutex locked.
307 * It is then unlocked when we come back from the CPU loop, in
308 * sync_with_kernel().
309 */
310 m_inst.get().lock_iothread();
311 }
312
313 /*
314 * Run the CPU loop. Only used in coroutine mode.
315 */
316 void run_cpu_loop()
317 {
318 auto last_vclock = m_inst.get().get_virtual_clock();
319 m_cpu.loop();
320 /*
321 * Workaround in icount mode: sometimes, the CPU does not execute
322 * on the first call of run_loop(). Give it a second chance.
323 */
324 for (int i = 0; i < m_inst.number_devices(); i++) {
325 if ((m_inst.get().get_virtual_clock() == last_vclock) && (m_cpu.can_run())) {
326 m_cpu.loop();
327 } else
328 break;
329 }
330 }
331
332 /*
333 * Called after a CPU loop run. It synchronizes with the kernel.
334 */
335 void sync_with_kernel()
336 {
337 int64_t now = m_inst.get().get_virtual_clock();
338
339 m_cpu.set_soft_stopped(true);
340
341 m_inst.get().unlock_iothread();
342 if (m_finished) return;
343 if (!m_coroutines) {
344 m_qk->start(); // we may have switched the QK off, so switch it on before setting
345 }
346 sc_core::sc_time sc_t = sc_core::sc_time_stamp();
347 if (sc_core::sc_time(now, sc_core::SC_NS) > sc_t) {
348 m_qk->set(sc_core::sc_time(now, sc_core::SC_NS) - sc_t);
349 }
350 // Important to allow QK to notify itself if it's waiting.
351 m_qk->sync();
352 }
353
354 /*
355 * Callback called when the CPU exits its execution loop. In coroutine
356 * mode, we yield here to come back to run_cpu_loop(). In TCG thread mode,
357 * we use this hook to synchronize with the kernel.
358 */
359 void end_of_loop_cb()
360 {
361 SCP_TRACE(())("End of loop");
362 if (m_finished) return;
363 /*
364 * In MTTCG mode, vCPU threads are created during elaboration and can
365 * call this callback before start_of_simulation() has completed.
366 * Skip sync_with_kernel/prepare_run_cpu until fully initialized.
367 */
368 if (!m_started) return;
369
370 if (m_coroutines) {
371 m_inst.get().coroutine_yield();
372 } else {
373 std::lock_guard<std::mutex> lock(m_can_delete);
374 sync_with_kernel();
375 prepare_run_cpu();
376 }
377 }
378
379 /*
380 * SystemC thread entry when running in coroutine mode.
381 */
382 void mainloop_thread_coroutine()
383 {
384 m_cpu.register_thread();
385
386 for (; !m_finished;) {
387 prepare_run_cpu();
388 run_cpu_loop();
389 sync_with_kernel();
390 }
391 }
392
393public:
394 cci::cci_param<unsigned int> p_gdb_port;
395
396 /* The default memory socket. Mapped to the default CPU address space in QEMU */
400
401 QemuCpu(const sc_core::sc_module_name& name, QemuInstance& inst, const std::string& type_name)
402 : QemuDevice(name, inst, (type_name + "-cpu").c_str())
403 , halt("halt")
404 , reset("reset")
405 , m_qemu_kick_ev(false)
406 , m_signaled(false)
407 , p_gdb_port("gdb_port", 0, "Wait for gdb connection on TCP port <gdb_port>")
408 , socket("mem", *this, inst)
409 {
410 using namespace std::placeholders;
411
412 m_external_ev |= m_qemu_kick_ev;
413
414 auto haltcb = std::bind(&QemuCpu::halt_cb, this, _1);
415 halt.register_value_changed_cb(haltcb);
416 auto resetcb = std::bind(&QemuCpu::reset_cb, this, _1);
417 reset.register_value_changed_cb(resetcb);
418
419 create_quantum_keeper();
420 set_coroutine_mode();
421
422 if (!m_coroutines) {
423 SC_THREAD(watch_external_ev);
424 }
425
426 m_inst.add_dev(this);
427
428 m_start_reset_done_ev.async_detach_suspending();
429 }
430
431 virtual ~QemuCpu()
432 {
433 end_of_simulation(); // catch the case we exited abnormally
434
435 /*
436 * Wait for any jobs that were already queued via async_run /
437 * async_safe_run to finish executing before we destroy the object.
438 * Those jobs might hold captured references.
439 */
440 {
441 std::unique_lock<std::mutex> lock(m_async_work_mutex);
442 if (!m_async_work_cv.wait_for(lock, std::chrono::milliseconds(ASYNC_WORK_TIMEOUT_MS),
443 [this] { return m_async_work_outstanding == 0; })) {
444 SCP_WARN(()) << "Timeout waiting for " << m_async_work_outstanding.load()
445 << " outstanding async work(s) to complete during destruction";
446 // We may arrive here if the QEMU thread never actually started, there was queue'd work waiting for it,
447 // but the simulation has been terminated.
448 }
449 }
450
451 while (!m_can_delete.try_lock()) {
452 m_qk->stop();
453 }
454 m_inst.del_dev(this);
455 }
456
457 // Process shutting down the CPU's at end of simulation, check this was done on destruction.
458 // This gives time for QEMU to exit etc.
459 void end_of_simulation() override
460 {
461 if (m_finished) return;
462 m_finished = true; // assert before taking lock (for co-routines too)
463
464 if (!m_cpu.valid()) {
465 /* CPU hasn't been created yet */
466 return;
467 }
468
469 if (!m_realized) {
470 return;
471 }
472
473 /*
474 * If start_of_simulation() was never called (e.g. the simulation
475 * aborted during elaboration) then finish_qemu_init() was never
476 * called either, and the QEMU iothread is still in its startup
477 * wait. Calling lock_iothread() in that state blocks forever
478 * inside wait_for_iothread_startup. Nothing useful can be done
479 * without a running iothread, so bail out early.
480 */
481 if (!m_started) {
482 return;
483 }
484
485 m_inst.get().lock_iothread();
486 /* Make sure QEMU won't call us anymore */
487 m_cpu.clear_callbacks();
488
489 if (m_coroutines) {
490 // can't join or wait for sc_event
491 m_inst.get().unlock_iothread();
492 return;
493 }
494
495 /* Unblock it if it's waiting for run budget */
496 m_qk->stop();
497
498 /* Unblock the CPU thread if it's sleeping */
499 set_signaled();
500
501 /* Wait for QEMU to terminate the CPU thread */
502 /*
503 * Theoretically we should m_cpu.remove_sync(); here, however if QEMU is in the process of an io operation or an
504 * exclusive cpu region, it will end up waiting for the io operation to finish (effectively waiting for the
505 * SystemC thread, or potentially another CPU that wont get the chance to exit)
506 */
507 m_cpu.halt(true);
508
509 m_inst.get().unlock_iothread();
510 m_cpu.kick(); // Just in case the CPU is currently in the big lock waiting
511 m_cpu.set_unplug(true);
512 }
513
514 /* NB this is usd to determin if this cpu can run in SINGLE mode
515 * for the m_inst.can_run calculation
516 */
517 bool can_run() override { return m_cpu.can_run(); }
518
519 void before_end_of_elaboration() override
520 {
521 QemuDevice::before_end_of_elaboration();
522
523 m_cpu = qemu::Cpu(m_dev);
524
525 if (m_coroutines) {
526 m_sc_thread = sc_core::sc_spawn(std::bind(&QemuCpu::mainloop_thread_coroutine, this));
527 }
528
529 socket.init(m_dev, "memory");
530
531 m_cpu.set_soft_stopped(true);
532
533 m_cpu.set_end_of_loop_callback(std::bind(&QemuCpu::end_of_loop_cb, this));
534 m_cpu.set_kick_callback(std::bind(&QemuCpu::kick_cb, this));
535
536 m_deadline_timer = m_inst.get().timer_new();
537 m_deadline_timer->set_callback(std::bind(&QemuCpu::deadline_timer_cb, this));
538
539 m_cpu_hint_ext.set_cpu(m_cpu);
540 }
541
542 void halt_cb(const bool& val)
543 {
544 SCP_TRACE(())("Halt : {}", val);
545 if (!m_finished) {
546 if (val) {
547 m_deadline_timer->del();
548 m_qk->stop();
549 } else {
550 m_qk->start();
551 rearm_deadline_timer();
552 }
553 m_inst.get().lock_iothread();
554 m_cpu.halt(val);
555 m_inst.get().unlock_iothread();
556 m_qemu_kick_ev.async_notify(); // notify the other thread so that the CPU is allowed to continue
557 }
558 }
559
560 /* NB _MUST_ be called from an SC_THREAD */
561 void reset_cb(const bool& val)
562 {
563 /* Assume this is on the SystemC thread, so no race condition issues */
564 if (m_finished) return;
565
566 if (val) {
567 if (m_resetting != none) return; // dont double reset!
568 SCP_WARN(())("Start reset");
569 m_resetting = start_reset;
570 m_cpu.async_safe_run(make_tracked_async_job([this] {
571 m_cpu.reset(true);
572 m_resetting = hold_reset;
573 m_start_reset_done_ev.async_notify();
574 })); // start the reset (which will pause the CPU)
575 } else {
576 if (m_resetting == none) return; // dont finish a finished reset!
577 while (m_resetting == start_reset) {
578 SCP_WARN(())("Hold reset");
579 sc_core::wait(m_start_reset_done_ev);
580 }
581 m_inst.get().lock_iothread();
582 socket.reset(); // remove DMI's (needs BQL for memory region updates)
583 m_inst.get().unlock_iothread();
584 m_cpu.reset(false); // call the end-of-reset (which will unpause the CPU)
585 m_qk->start(); // restart the QK if it's stopped
586 m_qk->reset();
587 m_qemu_kick_ev.async_notify(); // notify the other thread so that the CPU is allowed to continue
588 SCP_WARN(())("Finished reset");
589 m_resetting = none;
590 }
591 m_qemu_kick_ev.async_notify(); // notify the other thread so that the CPU is allowed to process if required
592 }
593 virtual void end_of_elaboration() override
594 {
595 QemuDevice::end_of_elaboration();
596
597 if (!p_gdb_port.is_default_value()) {
598 std::stringstream ss;
599 SCP_INFO(()) << "Starting gdb server on TCP port " << p_gdb_port;
600 ss << "tcp::" << p_gdb_port;
601 m_inst.get().start_gdb_server(ss.str());
602 }
603 }
604
605 virtual void start_of_simulation() override
606 {
607 m_quantum_ns = int64_t(tlm_utils::tlm_quantumkeeper::get_global_quantum().to_seconds() * 1e9);
608
609 QemuDevice::start_of_simulation();
610 if (m_inst.get_tcg_mode() == QemuInstance::TCG_SINGLE) {
611 if (m_inst.can_run()) {
612 m_qk->start();
613 }
614 } else if (!m_coroutines) {
615 /*
616 * In MTTCG mode, start the QK to register a suspending channel
617 * with the SystemC kernel. Without this, async_suspend() returns
618 * true (exit) whenever there are no pending events, which can
619 * happen in the gap between MMIO transactions processed by
620 * run_on_sysc(). The QK will be stopped later in wait_for_work()
621 * when the CPU halts (e.g. WFI), allowing normal starvation exit.
622 */
623 m_qk->start();
624 }
625
626 m_started = true;
627 if (!m_coroutines) {
628 /*
629 * Start the quantum keeper before kicking the CPU to ensure
630 * its tick event is attached as suspending. Without this, a
631 * fast CPU could complete and stop its QK before a slow CPU
632 * ever calls sync_with_kernel() (where start() was previously
633 * first called), leaving no suspending events and causing
634 * premature simulation exit due to starvation.
635 */
636 m_qk->start();
637
638 /* Prepare the CPU for its first run and release it
639 * Hold BQL to synchronize with the vCPU thread's idle-wait loop
640 * in qemu_process_cpu_events(). That loop checks cpu_thread_is_idle()
641 * (which reads soft_stopped) under BQL, then enters
642 * qemu_cond_wait(halt_cond, &bql) which atomically releases BQL.
643 * Without BQL here, the kick (broadcast on halt_cond) can be lost
644 * if the vCPU thread is between the idle check and the cond_wait.
645 */
646 m_inst.get().lock_iothread();
647 m_cpu.set_soft_stopped(false);
648 rearm_deadline_timer();
649 m_cpu.kick();
650 m_inst.get().unlock_iothread();
651 }
652
653 // Have not managed to figure out the root cause of the issue, but the
654 // PC is not properly set before running KVM, or it is possibly reset to
655 // 0 by some routine. By setting the vcpu as dirty, we trigger pushing
656 // registers to KVM just before running it.
657 m_cpu.set_vcpu_dirty(true);
658 }
659
660 /* QemuInitiatorIface */
661 virtual void initiator_customize_tlm_payload(TlmPayload& payload) override
662 {
663 /* Signal the other end we are a CPU */
664 payload.set_extension(&m_cpu_hint_ext);
665 }
666
667 virtual void initiator_tidy_tlm_payload(TlmPayload& payload) override { payload.clear_extension(&m_cpu_hint_ext); }
668
669 /*
670 * Called by the initiator socket just before a memory transaction.
671 * We update our current view of the local time and return it.
672 */
673 virtual sc_core::sc_time initiator_get_local_time() override
674 {
675 using sc_core::sc_time;
676 using sc_core::SC_NS;
677
679
680 if (m_finished) return sc_core::SC_ZERO_TIME;
681
682 vclock_now = m_inst.get().get_virtual_clock();
683 sc_core::sc_time sc_t = sc_core::sc_time_stamp();
684 if (sc_time(vclock_now, SC_NS) > sc_t) {
685 m_qk->set(sc_time(vclock_now, SC_NS) - sc_t);
686 return m_qk->get_local_time();
687 } else {
688 return sc_core::SC_ZERO_TIME;
689 }
690 }
691
692 /*
693 * Called after the transaction. We must update our local time view to
694 * match t.
695 */
696 virtual void initiator_set_local_time(const sc_core::sc_time& t) override
697 {
698 if (m_finished) return;
699 m_qk->set(t);
700
701 if (m_qk->need_sync()) {
702 /*
703 * Kick the CPU out of its execution loop so that we can sync with
704 * the kernel.
705 */
706 m_cpu.kick();
707 }
708 }
709
710 /* expose async run interface for DMI invalidation */
711 virtual void initiator_async_run(qemu::Cpu::AsyncJobFn job) override
712 {
713 if (!m_finished) m_cpu.async_run(make_tracked_async_job(std::move(job)));
714 }
715};
716
717#endif
Definition qemu-cpu-hint.h:17
Definition cpu.h:31
QEMU device abstraction as a SystemC module.
Definition device.h:37
Definition initiator.h:33
This class encapsulates a libqemu-cxx qemu::LibQemu instance. It handles QEMU parameters and instance...
Definition qemu-instance.h:89
qemu::LibQemu & get()
Returns the underlying qemu::LibQemu instance.
Definition qemu-instance.h:474
TcgMode get_tcg_mode()
Get the TCG mode for this instance.
Definition qemu-instance.h:375
std::shared_ptr< gs::tlm_quantumkeeper_extended > create_quantum_keeper()
Get the TCG mode for this instance.
Definition qemu-instance.h:387
Definition target.h:160
Definition async_event.h:22
Definition runonsysc.h:23
bool run_on_sysc(std::function< void()> job_entry, bool wait=true)
Run a job on the SystemC kernel thread.
Definition runonsysc.h:211
Definition libqemu-cxx.h:661