quic/qbox
Loading...
Searching...
No Matches
cpu.h
1/*
2 * This file is part of libqbox
3 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * Author: GreenSocs 2021
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9#ifndef _LIBQBOX_COMPONENTS_CPU_CPU_H
10#define _LIBQBOX_COMPONENTS_CPU_CPU_H
11
12#include <sstream>
13#include <mutex>
14#include <condition_variable>
15#include <atomic>
16
17#include <tlm>
18#include <tlm_utils/simple_initiator_socket.h>
19#include <tlm_utils/simple_target_socket.h>
20#include <cci_configuration>
21
22#include <libgssync.h>
23
24#include "device.h"
25#include "ports/initiator.h"
26#include "tlm-extensions/qemu-cpu-hint.h"
27#include "ports/qemu-target-signal-socket.h"
28
30{
31protected:
32 /*
33 * We have a unique copy per CPU of this extension, which is not dynamically allocated.
34 * We really don't want the default implementation to call delete on it...
35 */
37 {
38 public:
39 void free() override { /* leave my extension alone, TLM */ }
40 };
41
42 gs::runonsysc m_on_sysc;
43 std::shared_ptr<qemu::Timer> m_deadline_timer;
44 bool m_coroutines;
45
46 qemu::Cpu m_cpu;
47
48 gs::async_event m_qemu_kick_ev;
49 sc_core::sc_event_or_list m_external_ev;
50 sc_core::sc_process_handle m_sc_thread; // used for co-routines
51
52 std::atomic<bool> m_signaled;
53 std::mutex m_signaled_lock;
54 std::condition_variable m_signaled_cond;
55
56 std::shared_ptr<gs::tlm_quantumkeeper_extended> m_qk;
57 std::atomic<bool> m_finished = false;
58 std::atomic<bool> m_started = false;
59 enum { none, start_reset, hold_reset, finish_reset } m_resetting = none;
60 gs::async_event m_start_reset_done_ev;
61
62 std::mutex m_can_delete;
63 QemuCpuHintTlmExtension m_cpu_hint_ext;
64
65 uint64_t m_quantum_ns; // For convenience
66
67 /*
68 * Request quantum keeper from instance
69 */
70 void create_quantum_keeper()
71 {
72 m_qk = m_inst.create_quantum_keeper();
73
74 if (!m_qk) {
75 SCP_FATAL(()) << "qbox : Sync policy unknown";
76 }
77
78 m_qk->reset();
79 }
80
81 /*
82 * Given the quantum keeper nature (synchronous or asynchronous) and the
83 * p_icount parameter, we can configure the QEMU instance accordingly.
84 */
85 void set_coroutine_mode()
86 {
87 switch (m_qk->get_thread_type()) {
88 case gs::SyncPolicy::SYSTEMC_THREAD:
89 m_coroutines = true;
90 break;
91
92 case gs::SyncPolicy::OS_THREAD:
93 m_coroutines = false;
94 break;
95 }
96 }
97
98 /*
99 * ---- CPU loop related methods ----
100 */
101
102 /*
103 * Called by watch_external_ev and kick_cb in MTTCG mode. This keeps track
104 * of an external event in case the CPU thread just released the iothread
105 * and is going to call wait_for_work. This is needed to avoid missing an
106 * event and going to sleep while we should effectively wake-up.
107 *
108 * The coroutine mode does not use this method and use the SystemC kernel
109 * as a mean of synchronization. If an asynchronous event is triggered
110 * while the CPU thread go to sleep, the fact that the CPU thread is also
111 * the SystemC thread will ensure correct ordering of the events.
112 */
113 void set_signaled()
114 {
115 assert(!m_coroutines);
116 if (m_inst.get_tcg_mode() != QemuInstance::TCG_SINGLE) {
117 std::lock_guard<std::mutex> lock(m_signaled_lock);
118 m_signaled = true;
119 m_signaled_cond.notify_all();
120 } else {
121 std::lock_guard<std::mutex> lock(m_inst.g_signaled_lock);
122 m_inst.g_signaled = true;
123 m_inst.g_signaled_cond.notify_all();
124 }
125 }
126
127 /*
128 * SystemC thread watching the m_external_ev event list. Only used in MTTCG
129 * mode.
130 */
131 void watch_external_ev()
132 {
133 for (;;) {
134 wait(m_external_ev);
135 set_signaled();
136 }
137 }
138
139 /*
140 * Called when the CPU is kicked. We notify the corresponding async event
141 * to wake the CPU up if it was sleeping waiting for work.
142 */
143 void kick_cb()
144 {
145 SCP_TRACE(())("QEMU deadline KICK callback");
146 if (m_coroutines) {
147 if (!m_finished) m_qemu_kick_ev.async_notify();
148 } else {
149 set_signaled();
150 }
151 }
152
153 /*
154 * Called by the QEMU iothread when the deadline timer expires. We kick the
155 * CPU out of its execution loop for it to call the end_of_loop_cb callback.
156 * However, we should also handle the case that qemu is currently in 'sync'
157 * - by setting the time here, we will nudge the sync thread.
158 */
159 void deadline_timer_cb()
160 {
161 SCP_TRACE(())("QEMU deadline timer callback");
162 // All syncing will be done in end_of_loop_cb
163 m_cpu.kick();
164 // Rearm timer for next time ....
165 if (!m_finished) {
166 rearm_deadline_timer();
167
168 /* Take this opportunity to set the time */
169 int64_t now = m_inst.get().get_virtual_clock();
170 sc_core::sc_time sc_t = sc_core::sc_time_stamp();
171 if (sc_core::sc_time(now, sc_core::SC_NS) > sc_t) {
172 m_qk->set(sc_core::sc_time(now, sc_core::SC_NS) - sc_t);
173 }
174 }
175 }
176
177 /*
178 * The CPU does not have work anymore. Pause the CPU thread until we have
179 * some work to do.
180 *
181 * - In coroutine mode, this method runs a wait on the SystemC kernel,
182 * waiting for the m_external_ev list.
183 * - In MTTCG mode, we wait on the m_signaled_cond condition, signaled when
184 * set_signaled is called.
185 */
186 void wait_for_work()
187 {
188 SCP_TRACE(())("Wait for work");
189 m_qk->stop();
190 if (m_finished) return;
191
192 if (m_coroutines) {
193 m_on_sysc.run_on_sysc([this]() { wait(m_external_ev); });
194 } else {
195 if (m_inst.get_tcg_mode() != QemuInstance::TCG_SINGLE) {
196 std::unique_lock<std::mutex> lock(m_signaled_lock);
197 m_signaled_cond.wait(lock, [this] { return m_signaled || m_finished; });
198 m_signaled = false;
199 } else {
200 std::unique_lock<std::mutex> lock(m_inst.g_signaled_lock);
201 m_inst.g_signaled_cond.wait(lock, [this] { return m_inst.g_signaled || m_finished; });
202 m_inst.g_signaled = false;
203 }
204 }
205 if (m_finished) return;
206 SCP_TRACE(())("Have work, running CPU");
207 m_qk->start();
208 }
209
210 /*
211 * Set the deadline timer to trigger at the end of the time budget
212 */
213 void rearm_deadline_timer()
214 {
215 // This is a simple "every quantum" tick. Whether the QK makes use of it or not
216 // is down to the sync policy
217 m_deadline_timer->mod(m_inst.get().get_virtual_clock() + m_quantum_ns);
218 }
219
220 /*
221 * Called before running the CPU. Lock the BQL and set the deadline timer
222 * to not run beyond the time budget.
223 */
224 void prepare_run_cpu()
225 {
226 /*
227 * The QEMU CPU loop expect us to enter it with the iothread mutex locked.
228 * It is then unlocked when we come back from the CPU loop, in
229 * sync_with_kernel().
230 */
231
232 SCP_TRACE(())("Prepare run");
233 if (m_inst.get_tcg_mode() == QemuInstance::TCG_SINGLE) {
234 while (!m_inst.can_run() && !m_finished) {
235 wait_for_work();
236 }
237 } else {
238 while (!m_cpu.can_run() && !m_finished) {
239 if (!m_coroutines && !m_inst.is_tcg_enabled()) {
240 // For hardware accelerators (KVM/HVF), break back into the
241 // QEMU loop so the vCPU thread can handle signals delivered
242 // via ioctl. TCG does not need this.
243 SCP_TRACE(())("Stopping QK (accelerator)");
244 m_qk->stop();
245 break;
246 }
247 wait_for_work();
248 }
249 }
250
251 if (m_started) {
252 m_cpu.set_soft_stopped(false);
253 }
254 /*
255 * The QEMU CPU loop expect us to enter it with the iothread mutex locked.
256 * It is then unlocked when we come back from the CPU loop, in
257 * sync_with_kernel().
258 */
259 m_inst.get().lock_iothread();
260 }
261
262 /*
263 * Run the CPU loop. Only used in coroutine mode.
264 */
265 void run_cpu_loop()
266 {
267 auto last_vclock = m_inst.get().get_virtual_clock();
268 m_cpu.loop();
269 /*
270 * Workaround in icount mode: sometimes, the CPU does not execute
271 * on the first call of run_loop(). Give it a second chance.
272 */
273 for (int i = 0; i < m_inst.number_devices(); i++) {
274 if ((m_inst.get().get_virtual_clock() == last_vclock) && (m_cpu.can_run())) {
275 m_cpu.loop();
276 } else
277 break;
278 }
279 }
280
281 /*
282 * Called after a CPU loop run. It synchronizes with the kernel.
283 */
284 void sync_with_kernel()
285 {
286 int64_t now = m_inst.get().get_virtual_clock();
287
288 m_cpu.set_soft_stopped(true);
289
290 m_inst.get().unlock_iothread();
291 if (!m_coroutines) {
292 m_qk->start(); // we may have switched the QK off, so switch it on before setting
293 }
294 sc_core::sc_time sc_t = sc_core::sc_time_stamp();
295 if (sc_core::sc_time(now, sc_core::SC_NS) > sc_t) {
296 m_qk->set(sc_core::sc_time(now, sc_core::SC_NS) - sc_t);
297 }
298 // Important to allow QK to notify itself if it's waiting.
299 m_qk->sync();
300 }
301
302 /*
303 * Callback called when the CPU exits its execution loop. In coroutine
304 * mode, we yield here to come back to run_cpu_loop(). In TCG thread mode,
305 * we use this hook to synchronize with the kernel.
306 */
307 void end_of_loop_cb()
308 {
309 SCP_TRACE(())("End of loop");
310 if (m_finished) return;
311 /*
312 * In MTTCG mode, vCPU threads are created during elaboration and can
313 * call this callback before start_of_simulation() has completed.
314 * Skip sync_with_kernel/prepare_run_cpu until fully initialized.
315 */
316 if (!m_started) return;
317
318 if (m_coroutines) {
319 m_inst.get().coroutine_yield();
320 } else {
321 std::lock_guard<std::mutex> lock(m_can_delete);
322 sync_with_kernel();
323 prepare_run_cpu();
324 }
325 }
326
327 /*
328 * SystemC thread entry when running in coroutine mode.
329 */
330 void mainloop_thread_coroutine()
331 {
332 m_cpu.register_thread();
333
334 for (; !m_finished;) {
335 prepare_run_cpu();
336 run_cpu_loop();
337 sync_with_kernel();
338 }
339 }
340
341public:
342 cci::cci_param<unsigned int> p_gdb_port;
343
344 /* The default memory socket. Mapped to the default CPU address space in QEMU */
348
349
350 QemuCpu(const sc_core::sc_module_name& name, QemuInstance& inst, const std::string& type_name)
351 : QemuDevice(name, inst, (type_name + "-cpu").c_str())
352 , halt("halt")
353 , reset("reset")
354 , m_qemu_kick_ev(false)
355 , m_signaled(false)
356 , p_gdb_port("gdb_port", 0, "Wait for gdb connection on TCP port <gdb_port>")
357 , socket("mem", *this, inst)
358 {
359 using namespace std::placeholders;
360
361 m_external_ev |= m_qemu_kick_ev;
362
363 auto haltcb = std::bind(&QemuCpu::halt_cb, this, _1);
364 halt.register_value_changed_cb(haltcb);
365 auto resetcb = std::bind(&QemuCpu::reset_cb, this, _1);
366 reset.register_value_changed_cb(resetcb);
367
368 create_quantum_keeper();
369 set_coroutine_mode();
370
371 if (!m_coroutines) {
372 SC_THREAD(watch_external_ev);
373 }
374
375 m_inst.add_dev(this);
376
377 m_start_reset_done_ev.async_detach_suspending();
378 }
379
380 virtual ~QemuCpu()
381 {
382 end_of_simulation(); // catch the case we exited abnormally
383 while (!m_can_delete.try_lock()) {
384 m_qk->stop();
385 }
386 m_inst.del_dev(this);
387 }
388
389 // Process shutting down the CPU's at end of simulation, check this was done on destruction.
390 // This gives time for QEMU to exit etc.
391 void end_of_simulation() override
392 {
393 if (m_finished) return;
394 m_finished = true; // assert before taking lock (for co-routines too)
395
396 if (!m_cpu.valid()) {
397 /* CPU hasn't been created yet */
398 return;
399 }
400
401 if (!m_realized) {
402 return;
403 }
404
405 m_inst.get().lock_iothread();
406 /* Make sure QEMU won't call us anymore */
407 m_cpu.clear_callbacks();
408
409 if (m_coroutines) {
410 // can't join or wait for sc_event
411 m_inst.get().unlock_iothread();
412 return;
413 }
414
415 /* Unblock it if it's waiting for run budget */
416 m_qk->stop();
417
418 /* Unblock the CPU thread if it's sleeping */
419 set_signaled();
420
421 /* Unblock it if it's waiting for some I/O to complete */
422 socket.cancel_all();
423
424 /* Wait for QEMU to terminate the CPU thread */
425 /*
426 * Theoretically we should m_cpu.remove_sync(); here, however if QEMU is in the process of an io operation or an
427 * exclusive cpu region, it will end up waiting for the io operation to finish (effectively waiting for the
428 * SystemC thread, or potentially another CPU that wont get the chance to exit)
429 */
430 m_cpu.set_unplug(true);
431 m_cpu.halt(true);
432
433 m_inst.get().unlock_iothread();
434 m_cpu.kick(); // Just in case the CPU is currently in the big lock waiting
435 }
436
437 /* NB this is usd to determin if this cpu can run in SINGLE mode
438 * for the m_inst.can_run calculation
439 */
440 bool can_run() override { return m_cpu.can_run(); }
441
442 void before_end_of_elaboration() override
443 {
444 QemuDevice::before_end_of_elaboration();
445
446 m_cpu = qemu::Cpu(m_dev);
447
448 if (m_coroutines) {
449 m_sc_thread = sc_core::sc_spawn(std::bind(&QemuCpu::mainloop_thread_coroutine, this));
450 }
451
452 socket.init(m_dev, "memory");
453
454 m_cpu.set_soft_stopped(true);
455
456 m_cpu.set_end_of_loop_callback(std::bind(&QemuCpu::end_of_loop_cb, this));
457 m_cpu.set_kick_callback(std::bind(&QemuCpu::kick_cb, this));
458
459 m_deadline_timer = m_inst.get().timer_new();
460 m_deadline_timer->set_callback(std::bind(&QemuCpu::deadline_timer_cb, this));
461
462 m_cpu_hint_ext.set_cpu(m_cpu);
463 }
464
465 void halt_cb(const bool& val)
466 {
467 SCP_TRACE(())("Halt : {}", val);
468 if (!m_finished) {
469 if (val) {
470 m_deadline_timer->del();
471 m_qk->stop();
472 } else {
473 m_qk->start();
474 rearm_deadline_timer();
475 }
476 m_inst.get().lock_iothread();
477 m_cpu.halt(val);
478 m_inst.get().unlock_iothread();
479 m_qemu_kick_ev.async_notify(); // notify the other thread so that the CPU is allowed to continue
480 }
481 }
482
483 /* NB _MUST_ be called from an SC_THREAD */
484 void reset_cb(const bool& val)
485 {
486 /* Assume this is on the SystemC thread, so no race condition issues */
487 if (m_finished) return;
488
489 if (val) {
490 if (m_resetting != none) return; // dont double reset!
491 SCP_WARN(())("Start reset");
492 m_resetting = start_reset;
493 m_cpu.async_safe_run([&] {
494 m_cpu.reset(true);
495 m_resetting = hold_reset;
496 m_start_reset_done_ev.async_notify();
497 }); // start the reset (which will pause the CPU)
498 } else {
499 if (m_resetting == none) return; // dont finish a finished reset!
500 while (m_resetting == start_reset) {
501 SCP_WARN(())("Hold reset");
502 sc_core::wait(m_start_reset_done_ev);
503 }
504 m_inst.get().lock_iothread();
505 socket.reset(); // remove DMI's (needs BQL for memory region updates)
506 m_inst.get().unlock_iothread();
507 m_cpu.reset(false); // call the end-of-reset (which will unpause the CPU)
508 m_qk->start(); // restart the QK if it's stopped
509 m_qk->reset();
510 m_qemu_kick_ev.async_notify(); // notify the other thread so that the CPU is allowed to continue
511 SCP_WARN(())("Finished reset");
512 m_resetting = none;
513 }
514 m_qemu_kick_ev.async_notify(); // notify the other thread so that the CPU is allowed to process if required
515 }
516 virtual void end_of_elaboration() override
517 {
518 QemuDevice::end_of_elaboration();
519
520 if (!p_gdb_port.is_default_value()) {
521 std::stringstream ss;
522 SCP_INFO(()) << "Starting gdb server on TCP port " << p_gdb_port;
523 ss << "tcp::" << p_gdb_port;
524 m_inst.get().start_gdb_server(ss.str());
525 }
526 }
527
528 virtual void start_of_simulation() override
529 {
530 m_quantum_ns = int64_t(tlm_utils::tlm_quantumkeeper::get_global_quantum().to_seconds() * 1e9);
531
532 QemuDevice::start_of_simulation();
533 if (m_inst.get_tcg_mode() == QemuInstance::TCG_SINGLE) {
534 if (m_inst.can_run()) {
535 m_qk->start();
536 }
537 } else if (!m_coroutines) {
538 /*
539 * In MTTCG mode, start the QK to register a suspending channel
540 * with the SystemC kernel. Without this, async_suspend() returns
541 * true (exit) whenever there are no pending events, which can
542 * happen in the gap between MMIO transactions processed by
543 * run_on_sysc(). The QK will be stopped later in wait_for_work()
544 * when the CPU halts (e.g. WFI), allowing normal starvation exit.
545 */
546 m_qk->start();
547 }
548
549 m_started = true;
550 if (!m_coroutines) {
551 /*
552 * Start the quantum keeper before kicking the CPU to ensure
553 * its tick event is attached as suspending. Without this, a
554 * fast CPU could complete and stop its QK before a slow CPU
555 * ever calls sync_with_kernel() (where start() was previously
556 * first called), leaving no suspending events and causing
557 * premature simulation exit due to starvation.
558 */
559 m_qk->start();
560
561 /* Prepare the CPU for its first run and release it
562 * Hold BQL to synchronize with the vCPU thread's idle-wait loop
563 * in qemu_process_cpu_events(). That loop checks cpu_thread_is_idle()
564 * (which reads soft_stopped) under BQL, then enters
565 * qemu_cond_wait(halt_cond, &bql) which atomically releases BQL.
566 * Without BQL here, the kick (broadcast on halt_cond) can be lost
567 * if the vCPU thread is between the idle check and the cond_wait.
568 */
569 m_inst.get().lock_iothread();
570 m_cpu.set_soft_stopped(false);
571 rearm_deadline_timer();
572 m_cpu.kick();
573 m_inst.get().unlock_iothread();
574 }
575
576 // Have not managed to figure out the root cause of the issue, but the
577 // PC is not properly set before running KVM, or it is possibly reset to
578 // 0 by some routine. By setting the vcpu as dirty, we trigger pushing
579 // registers to KVM just before running it.
580 m_cpu.set_vcpu_dirty(true);
581 }
582
583 /* QemuInitiatorIface */
584 virtual void initiator_customize_tlm_payload(TlmPayload& payload) override
585 {
586 /* Signal the other end we are a CPU */
587 payload.set_extension(&m_cpu_hint_ext);
588 }
589
590 virtual void initiator_tidy_tlm_payload(TlmPayload& payload) override { payload.clear_extension(&m_cpu_hint_ext); }
591
592 /*
593 * Called by the initiator socket just before a memory transaction.
594 * We update our current view of the local time and return it.
595 */
596 virtual sc_core::sc_time initiator_get_local_time() override
597 {
598 using sc_core::sc_time;
599 using sc_core::SC_NS;
600
602
603 vclock_now = m_inst.get().get_virtual_clock();
604 sc_core::sc_time sc_t = sc_core::sc_time_stamp();
605 if (sc_time(vclock_now, SC_NS) > sc_t) {
606 m_qk->set(sc_time(vclock_now, SC_NS) - sc_t);
607 return m_qk->get_local_time();
608 } else {
609 return sc_core::SC_ZERO_TIME;
610 }
611 }
612
613 /*
614 * Called after the transaction. We must update our local time view to
615 * match t.
616 */
617 virtual void initiator_set_local_time(const sc_core::sc_time& t) override
618 {
619 m_qk->set(t);
620
621 if (m_qk->need_sync()) {
622 /*
623 * Kick the CPU out of its execution loop so that we can sync with
624 * the kernel.
625 */
626 m_cpu.kick();
627 }
628 }
629
630 /* expose async run interface for DMI invalidation */
631 virtual void initiator_async_run(qemu::Cpu::AsyncJobFn job) override
632 {
633 if (!m_finished) m_cpu.async_run(job);
634 }
635};
636
637#endif
Definition qemu-cpu-hint.h:17
Definition cpu.h:30
QEMU device abstraction as a SystemC module.
Definition device.h:37
Definition initiator.h:33
This class encapsulates a libqemu-cxx qemu::LibQemu instance. It handles QEMU parameters and instance...
Definition qemu-instance.h:89
qemu::LibQemu & get()
Returns the underlying qemu::LibQemu instance.
Definition qemu-instance.h:474
TcgMode get_tcg_mode()
Get the TCG mode for this instance.
Definition qemu-instance.h:375
std::shared_ptr< gs::tlm_quantumkeeper_extended > create_quantum_keeper()
Get the TCG mode for this instance.
Definition qemu-instance.h:387
Definition target.h:160
Definition async_event.h:22
Definition runonsysc.h:23
bool run_on_sysc(std::function< void()> job_entry, bool wait=true)
Run a job on the SystemC kernel thread.
Definition runonsysc.h:183
Definition libqemu-cxx.h:661