quic/qbox
Loading...
Searching...
No Matches
cpu.h
1/*
2 * This file is part of libqbox
3 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * Author: GreenSocs 2021
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9#ifndef _LIBQBOX_COMPONENTS_CPU_CPU_H
10#define _LIBQBOX_COMPONENTS_CPU_CPU_H
11
12#include <sstream>
13#include <mutex>
14#include <condition_variable>
15#include <atomic>
16
17#include <tlm>
18#include <tlm_utils/simple_initiator_socket.h>
19#include <tlm_utils/simple_target_socket.h>
20#include <cci_configuration>
21
22#include <libgssync.h>
23
24#include "device.h"
25#include "ports/initiator.h"
26#include "tlm-extensions/qemu-cpu-hint.h"
27#include "ports/qemu-target-signal-socket.h"
28
30{
31protected:
32 /*
33 * We have a unique copy per CPU of this extension, which is not dynamically allocated.
34 * We really don't want the default implementation to call delete on it...
35 */
37 {
38 public:
39 void free() override { /* leave my extension alone, TLM */ }
40 };
41
42 gs::runonsysc m_on_sysc;
43 std::shared_ptr<qemu::Timer> m_deadline_timer;
44 bool m_coroutines;
45
46 qemu::Cpu m_cpu;
47
48 gs::async_event m_qemu_kick_ev;
49 sc_core::sc_event_or_list m_external_ev;
50 sc_core::sc_process_handle m_sc_thread; // used for co-routines
51
52 std::atomic<bool> m_signaled;
53 std::mutex m_signaled_lock;
54 std::condition_variable m_signaled_cond;
55
56 std::shared_ptr<gs::tlm_quantumkeeper_extended> m_qk;
57 std::atomic<bool> m_finished = false;
58 std::atomic<bool> m_started = false;
59 enum { none, start_reset, hold_reset, finish_reset } m_resetting = none;
60 gs::async_event m_start_reset_done_ev;
61
62 std::mutex m_can_delete;
63 QemuCpuHintTlmExtension m_cpu_hint_ext;
64
65 uint64_t m_quantum_ns; // For convenience
66
67 /*
68 * Request quantum keeper from instance
69 */
70 void create_quantum_keeper()
71 {
72 m_qk = m_inst.create_quantum_keeper();
73
74 if (!m_qk) {
75 SCP_FATAL(()) << "qbox : Sync policy unknown";
76 }
77
78 m_qk->reset();
79 }
80
81 /*
82 * Given the quantum keeper nature (synchronous or asynchronous) and the
83 * p_icount parameter, we can configure the QEMU instance accordingly.
84 */
85 void set_coroutine_mode()
86 {
87 switch (m_qk->get_thread_type()) {
88 case gs::SyncPolicy::SYSTEMC_THREAD:
89 m_coroutines = true;
90 break;
91
92 case gs::SyncPolicy::OS_THREAD:
93 m_coroutines = false;
94 break;
95 }
96 }
97
98 /*
99 * ---- CPU loop related methods ----
100 */
101
102 /*
103 * Called by watch_external_ev and kick_cb in MTTCG mode. This keeps track
104 * of an external event in case the CPU thread just released the iothread
105 * and is going to call wait_for_work. This is needed to avoid missing an
106 * event and going to sleep while we should effectively wake-up.
107 *
108 * The coroutine mode does not use this method and use the SystemC kernel
109 * as a mean of synchronization. If an asynchronous event is triggered
110 * while the CPU thread go to sleep, the fact that the CPU thread is also
111 * the SystemC thread will ensure correct ordering of the events.
112 */
113 void set_signaled()
114 {
115 assert(!m_coroutines);
116 if (m_inst.get_tcg_mode() != QemuInstance::TCG_SINGLE) {
117 std::lock_guard<std::mutex> lock(m_signaled_lock);
118 m_signaled = true;
119 m_signaled_cond.notify_all();
120 } else {
121 std::lock_guard<std::mutex> lock(m_inst.g_signaled_lock);
122 m_inst.g_signaled = true;
123 m_inst.g_signaled_cond.notify_all();
124 }
125 }
126
127 /*
128 * SystemC thread watching the m_external_ev event list. Only used in MTTCG
129 * mode.
130 */
131 void watch_external_ev()
132 {
133 for (;;) {
134 wait(m_external_ev);
135 set_signaled();
136 }
137 }
138
139 /*
140 * Called when the CPU is kicked. We notify the corresponding async event
141 * to wake the CPU up if it was sleeping waiting for work.
142 */
143 void kick_cb()
144 {
145 SCP_TRACE(())("QEMU deadline KICK callback");
146 if (m_coroutines) {
147 if (!m_finished) m_qemu_kick_ev.async_notify();
148 } else {
149 set_signaled();
150 }
151 }
152
153 /*
154 * Called by the QEMU iothread when the deadline timer expires. We kick the
155 * CPU out of its execution loop for it to call the end_of_loop_cb callback.
156 * However, we should also handle the case that qemu is currently in 'sync'
157 * - by setting the time here, we will nudge the sync thread.
158 */
159 void deadline_timer_cb()
160 {
161 SCP_TRACE(())("QEMU deadline timer callback");
162 // All syncing will be done in end_of_loop_cb
163 m_cpu.kick();
164 // Rearm timer for next time ....
165 if (!m_finished) {
166 rearm_deadline_timer();
167
168 /* Take this opportunity to set the time */
169 int64_t now = m_inst.get().get_virtual_clock();
170 sc_core::sc_time sc_t = sc_core::sc_time_stamp();
171 if (sc_core::sc_time(now, sc_core::SC_NS) > sc_t) {
172 m_qk->set(sc_core::sc_time(now, sc_core::SC_NS) - sc_t);
173 }
174 }
175 }
176
177 /*
178 * The CPU does not have work anymore. Pause the CPU thread until we have
179 * some work to do.
180 *
181 * - In coroutine mode, this method runs a wait on the SystemC kernel,
182 * waiting for the m_external_ev list.
183 * - In MTTCG mode, we wait on the m_signaled_cond condition, signaled when
184 * set_signaled is called.
185 */
186 void wait_for_work()
187 {
188 SCP_TRACE(())("Wait for work");
189 m_qk->stop();
190 if (m_finished) return;
191
192 if (m_coroutines) {
193 m_on_sysc.run_on_sysc([this]() { wait(m_external_ev); });
194 } else {
195 if (m_inst.get_tcg_mode() != QemuInstance::TCG_SINGLE) {
196 std::unique_lock<std::mutex> lock(m_signaled_lock);
197 m_signaled_cond.wait(lock, [this] { return m_signaled || m_finished; });
198 m_signaled = false;
199 } else {
200 std::unique_lock<std::mutex> lock(m_inst.g_signaled_lock);
201 m_inst.g_signaled_cond.wait(lock, [this] { return m_inst.g_signaled || m_finished; });
202 m_inst.g_signaled = false;
203 }
204 }
205 if (m_finished) return;
206 SCP_TRACE(())("Have work, running CPU");
207 m_qk->start();
208 }
209
210 /*
211 * Set the deadline timer to trigger at the end of the time budget
212 */
213 void rearm_deadline_timer()
214 {
215 // This is a simple "every quantum" tick. Whether the QK makes use of it or not
216 // is down to the sync policy
217 m_deadline_timer->mod(m_inst.get().get_virtual_clock() + m_quantum_ns);
218 }
219
220 /*
221 * Called before running the CPU. Lock the BQL and set the deadline timer
222 * to not run beyond the time budget.
223 */
224 void prepare_run_cpu()
225 {
226 /*
227 * The QEMU CPU loop expect us to enter it with the iothread mutex locked.
228 * It is then unlocked when we come back from the CPU loop, in
229 * sync_with_kernel().
230 */
231
232 SCP_TRACE(())("Prepare run");
233 if (m_inst.get_tcg_mode() == QemuInstance::TCG_SINGLE) {
234 while (!m_inst.can_run() && !m_finished) {
235 wait_for_work();
236 }
237 } else {
238 while (!m_cpu.can_run() && !m_finished) {
239 if (!m_coroutines) {
240 // In the case of accelerators, allow them to handle signals etc.
241 SCP_TRACE(())("Stopping QK");
242 m_qk->stop(); // Stop the QK, it will be enabled when we next see work to do.
243 break;
244 }
245 wait_for_work();
246 }
247 }
248
249 if (m_started) {
250 m_cpu.set_soft_stopped(false);
251 }
252 /*
253 * The QEMU CPU loop expect us to enter it with the iothread mutex locked.
254 * It is then unlocked when we come back from the CPU loop, in
255 * sync_with_kernel().
256 */
257 m_inst.get().lock_iothread();
258 }
259
260 /*
261 * Run the CPU loop. Only used in coroutine mode.
262 */
263 void run_cpu_loop()
264 {
265 auto last_vclock = m_inst.get().get_virtual_clock();
266 m_cpu.loop();
267 /*
268 * Workaround in icount mode: sometimes, the CPU does not execute
269 * on the first call of run_loop(). Give it a second chance.
270 */
271 for (int i = 0; i < m_inst.number_devices(); i++) {
272 if ((m_inst.get().get_virtual_clock() == last_vclock) && (m_cpu.can_run())) {
273 m_cpu.loop();
274 } else
275 break;
276 }
277 }
278
279 /*
280 * Called after a CPU loop run. It synchronizes with the kernel.
281 */
282 void sync_with_kernel()
283 {
284 int64_t now = m_inst.get().get_virtual_clock();
285
286 m_cpu.set_soft_stopped(true);
287
288 m_inst.get().unlock_iothread();
289 if (!m_coroutines) {
290 m_qk->start(); // we may have switched the QK off, so switch it on before setting
291 }
292 sc_core::sc_time sc_t = sc_core::sc_time_stamp();
293 if (sc_core::sc_time(now, sc_core::SC_NS) > sc_t) {
294 m_qk->set(sc_core::sc_time(now, sc_core::SC_NS) - sc_t);
295 }
296 // Important to allow QK to notify itself if it's waiting.
297 m_qk->sync();
298 }
299
300 /*
301 * Callback called when the CPU exits its execution loop. In coroutine
302 * mode, we yield here to come back to run_cpu_loop(). In TCG thread mode,
303 * we use this hook to synchronize with the kernel.
304 */
305 void end_of_loop_cb()
306 {
307 SCP_TRACE(())("End of loop");
308 if (m_finished) return;
309 if (m_coroutines) {
310 m_inst.get().coroutine_yield();
311 } else {
312 std::lock_guard<std::mutex> lock(m_can_delete);
313 sync_with_kernel();
314 prepare_run_cpu();
315 }
316 }
317
318 /*
319 * SystemC thread entry when running in coroutine mode.
320 */
321 void mainloop_thread_coroutine()
322 {
323 m_cpu.register_thread();
324
325 for (; !m_finished;) {
326 prepare_run_cpu();
327 run_cpu_loop();
328 sync_with_kernel();
329 }
330 }
331
332public:
333 cci::cci_param<unsigned int> p_gdb_port;
334
335 /* The default memory socket. Mapped to the default CPU address space in QEMU */
339
340
341 QemuCpu(const sc_core::sc_module_name& name, QemuInstance& inst, const std::string& type_name)
342 : QemuDevice(name, inst, (type_name + "-cpu").c_str())
343 , halt("halt")
344 , reset("reset")
345 , m_qemu_kick_ev(false)
346 , m_signaled(false)
347 , p_gdb_port("gdb_port", 0, "Wait for gdb connection on TCP port <gdb_port>")
348 , socket("mem", *this, inst)
349 {
350 using namespace std::placeholders;
351
352 m_external_ev |= m_qemu_kick_ev;
353
354 auto haltcb = std::bind(&QemuCpu::halt_cb, this, _1);
355 halt.register_value_changed_cb(haltcb);
356 auto resetcb = std::bind(&QemuCpu::reset_cb, this, _1);
357 reset.register_value_changed_cb(resetcb);
358
359 create_quantum_keeper();
360 set_coroutine_mode();
361
362 if (!m_coroutines) {
363 SC_THREAD(watch_external_ev);
364 }
365
366 m_inst.add_dev(this);
367
368 m_start_reset_done_ev.async_detach_suspending();
369 }
370
371 virtual ~QemuCpu()
372 {
373 end_of_simulation(); // catch the case we exited abnormally
374 while (!m_can_delete.try_lock()) {
375 m_qk->stop();
376 }
377 m_inst.del_dev(this);
378 }
379
380 // Process shutting down the CPU's at end of simulation, check this was done on destruction.
381 // This gives time for QEMU to exit etc.
382 void end_of_simulation() override
383 {
384 if (m_finished) return;
385 m_finished = true; // assert before taking lock (for co-routines too)
386
387 if (!m_cpu.valid()) {
388 /* CPU hasn't been created yet */
389 return;
390 }
391
392 if (!m_realized) {
393 return;
394 }
395
396 m_inst.get().lock_iothread();
397 /* Make sure QEMU won't call us anymore */
398 m_cpu.clear_callbacks();
399
400 if (m_coroutines) {
401 // can't join or wait for sc_event
402 m_inst.get().unlock_iothread();
403 return;
404 }
405
406 /* Unblock it if it's waiting for run budget */
407 m_qk->stop();
408
409 /* Unblock the CPU thread if it's sleeping */
410 set_signaled();
411
412 /* Unblock it if it's waiting for some I/O to complete */
413 socket.cancel_all();
414
415 /* Wait for QEMU to terminate the CPU thread */
416 /*
417 * Theoretically we should m_cpu.remove_sync(); here, however if QEMU is in the process of an io operation or an
418 * exclusive cpu region, it will end up waiting for the io operation to finish (effectively waiting for the
419 * SystemC thread, or potentially another CPU that wont get the chance to exit)
420 */
421 m_cpu.set_unplug(true);
422 m_cpu.halt(true);
423
424 m_inst.get().unlock_iothread();
425 m_cpu.kick(); // Just in case the CPU is currently in the big lock waiting
426 }
427
428 /* NB this is usd to determin if this cpu can run in SINGLE mode
429 * for the m_inst.can_run calculation
430 */
431 bool can_run() override { return m_cpu.can_run(); }
432
433 void before_end_of_elaboration() override
434 {
435 QemuDevice::before_end_of_elaboration();
436
437 m_cpu = qemu::Cpu(m_dev);
438
439 if (m_coroutines) {
440 m_sc_thread = sc_core::sc_spawn(std::bind(&QemuCpu::mainloop_thread_coroutine, this));
441 }
442
443 socket.init(m_dev, "memory");
444
445 m_cpu.set_soft_stopped(true);
446
447 m_cpu.set_end_of_loop_callback(std::bind(&QemuCpu::end_of_loop_cb, this));
448 m_cpu.set_kick_callback(std::bind(&QemuCpu::kick_cb, this));
449
450 m_deadline_timer = m_inst.get().timer_new();
451 m_deadline_timer->set_callback(std::bind(&QemuCpu::deadline_timer_cb, this));
452
453 m_cpu_hint_ext.set_cpu(m_cpu);
454 }
455
456 void halt_cb(const bool& val)
457 {
458 SCP_TRACE(())("Halt : {}", val);
459 if (!m_finished) {
460 if (val) {
461 m_deadline_timer->del();
462 m_qk->stop();
463 } else {
464 m_qk->start();
465 rearm_deadline_timer();
466 }
467 m_inst.get().lock_iothread();
468 m_cpu.halt(val);
469 m_inst.get().unlock_iothread();
470 m_qemu_kick_ev.async_notify(); // notify the other thread so that the CPU is allowed to continue
471 }
472 }
473
474 /* NB _MUST_ be called from an SC_THREAD */
475 void reset_cb(const bool& val)
476 {
477 /* Assume this is on the SystemC thread, so no race condition issues */
478 if (m_finished) return;
479
480 if (val) {
481 if (m_resetting != none) return; // dont double reset!
482 SCP_WARN(())("Start reset");
483 m_resetting = start_reset;
484 m_cpu.async_safe_run([&] {
485 m_cpu.reset(true);
486 m_resetting = hold_reset;
487 m_start_reset_done_ev.async_notify();
488 }); // start the reset (which will pause the CPU)
489 } else {
490 if (m_resetting == none) return; // dont finish a finished reset!
491 while (m_resetting == start_reset) {
492 SCP_WARN(())("Hold reset");
493 sc_core::wait(m_start_reset_done_ev);
494 }
495 socket.reset(); // remove DMI's
496 m_cpu.reset(false); // call the end-of-reset (which will unpause the CPU)
497 m_qk->start(); // restart the QK if it's stopped
498 m_qk->reset();
499 m_qemu_kick_ev.async_notify(); // notify the other thread so that the CPU is allowed to continue
500 SCP_WARN(())("Finished reset");
501 m_resetting = none;
502 }
503 m_qemu_kick_ev.async_notify(); // notify the other thread so that the CPU is allowed to process if required
504 }
505 virtual void end_of_elaboration() override
506 {
507 QemuDevice::end_of_elaboration();
508
509 if (!p_gdb_port.is_default_value()) {
510 std::stringstream ss;
511 SCP_INFO(()) << "Starting gdb server on TCP port " << p_gdb_port;
512 ss << "tcp::" << p_gdb_port;
513 m_inst.get().start_gdb_server(ss.str());
514 }
515 }
516
517 virtual void start_of_simulation() override
518 {
519 m_quantum_ns = int64_t(tlm_utils::tlm_quantumkeeper::get_global_quantum().to_seconds() * 1e9);
520
521 QemuDevice::start_of_simulation();
522 if (m_inst.get_tcg_mode() == QemuInstance::TCG_SINGLE) {
523 if (m_inst.can_run()) {
524 m_qk->start();
525 }
526 }
527 if (!m_coroutines) {
528 /* Prepare the CPU for its first run and release it */
529 m_cpu.set_soft_stopped(false);
530 rearm_deadline_timer();
531 m_cpu.kick();
532 }
533
534 // Have not managed to figure out the root cause of the issue, but the
535 // PC is not properly set before running KVM, or it is possibly reset to
536 // 0 by some routine. By setting the vcpu as dirty, we trigger pushing
537 // registers to KVM just before running it.
538 m_cpu.set_vcpu_dirty(true);
539
540 m_started = true;
541 }
542
543 /* QemuInitiatorIface */
544 virtual void initiator_customize_tlm_payload(TlmPayload& payload) override
545 {
546 /* Signal the other end we are a CPU */
547 payload.set_extension(&m_cpu_hint_ext);
548 }
549
550 virtual void initiator_tidy_tlm_payload(TlmPayload& payload) override { payload.clear_extension(&m_cpu_hint_ext); }
551
552 /*
553 * Called by the initiator socket just before a memory transaction.
554 * We update our current view of the local time and return it.
555 */
556 virtual sc_core::sc_time initiator_get_local_time() override
557 {
558 using sc_core::sc_time;
559 using sc_core::SC_NS;
560
562
563 vclock_now = m_inst.get().get_virtual_clock();
564 sc_core::sc_time sc_t = sc_core::sc_time_stamp();
565 if (sc_time(vclock_now, SC_NS) > sc_t) {
566 m_qk->set(sc_time(vclock_now, SC_NS) - sc_t);
567 return m_qk->get_local_time();
568 } else {
569 return sc_core::SC_ZERO_TIME;
570 }
571 }
572
573 /*
574 * Called after the transaction. We must update our local time view to
575 * match t.
576 */
577 virtual void initiator_set_local_time(const sc_core::sc_time& t) override
578 {
579 m_qk->set(t);
580
581 if (m_qk->need_sync()) {
582 /*
583 * Kick the CPU out of its execution loop so that we can sync with
584 * the kernel.
585 */
586 m_cpu.kick();
587 }
588 }
589
590 /* expose async run interface for DMI invalidation */
591 virtual void initiator_async_run(qemu::Cpu::AsyncJobFn job) override
592 {
593 if (!m_finished) m_cpu.async_run(job);
594 }
595};
596
597#endif
Definition qemu-cpu-hint.h:17
Definition cpu.h:30
QEMU device abstraction as a SystemC module.
Definition device.h:37
Definition initiator.h:38
This class encapsulates a libqemu-cxx qemu::LibQemu instance. It handles QEMU parameters and instance...
Definition qemu-instance.h:89
qemu::LibQemu & get()
Returns the underlying qemu::LibQemu instance.
Definition qemu-instance.h:448
TcgMode get_tcg_mode()
Get the TCG mode for this instance.
Definition qemu-instance.h:349
std::shared_ptr< gs::tlm_quantumkeeper_extended > create_quantum_keeper()
Get the TCG mode for this instance.
Definition qemu-instance.h:361
Definition target.h:160
Definition async_event.h:22
Definition runonsysc.h:22
bool run_on_sysc(std::function< void()> job_entry, bool wait=true)
Run a job on the SystemC kernel thread.
Definition runonsysc.h:181
Definition libqemu-cxx.h:664