quic/qbox
Loading...
Searching...
No Matches
initiator.h
1/*
2 * This file is part of libqbox
3 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * Author: GreenSocs 2021
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9#ifndef _LIBQBOX_PORTS_INITIATOR_H
10#define _LIBQBOX_PORTS_INITIATOR_H
11
12#include <functional>
13#include <limits>
14#include <cassert>
15#include <cinttypes>
16
17#include <tlm>
18
19#include <libqemu-cxx/libqemu-cxx.h>
20
21#include <libgssync.h>
22
23#include <scp/report.h>
24
25#include <qemu-instance.h>
26#include <tlm-extensions/qemu-mr-hint.h>
27#include <tlm-extensions/exclusive-access.h>
28#include <tlm-extensions/shmem_extension.h>
29#include <tlm-extensions/underlying-dmi.h>
30#include <tlm_sockets_buswidth.h>
31
33{
34public:
35 using TlmPayload = tlm::tlm_generic_payload;
36
37 virtual void initiator_customize_tlm_payload(TlmPayload& payload) = 0;
38 virtual void initiator_tidy_tlm_payload(TlmPayload& payload) = 0;
39 virtual sc_core::sc_time initiator_get_local_time() = 0;
40 virtual void initiator_set_local_time(const sc_core::sc_time&) = 0;
41 virtual void initiator_async_run(qemu::Cpu::AsyncJobFn job) = 0;
42};
43
54template <unsigned int BUSWIDTH = DEFAULT_TLM_BUSWIDTH>
56 : public tlm::tlm_initiator_socket<BUSWIDTH, tlm::tlm_base_protocol_types, 1, sc_core::SC_ZERO_OR_MORE_BOUND>,
57 public tlm::tlm_bw_transport_if<>
58{
59private:
60 std::mutex m_mutex;
61 std::vector<std::pair<sc_dt::uint64, sc_dt::uint64>> m_ranges;
62 std::thread::id m_thread_id;
63
64public:
65 SCP_LOGGER(());
66
67 using TlmInitiatorSocket = tlm::tlm_initiator_socket<BUSWIDTH, tlm::tlm_base_protocol_types, 1,
68 sc_core::SC_ZERO_OR_MORE_BOUND>;
69 using TlmPayload = tlm::tlm_generic_payload;
70 using MemTxResult = qemu::MemoryRegionOps::MemTxResult;
74
76
77protected:
78 QemuInstance& m_inst;
79 QemuInitiatorIface& m_initiator;
80 qemu::Device m_dev;
81 gs::runonsysc m_on_sysc;
82 int reentrancy = 0;
83
84 std::atomic<bool> m_finished = false;
85
86 std::shared_ptr<qemu::AddressSpace> m_as;
87 std::shared_ptr<qemu::MemoryListener> m_listener;
88 std::map<uint64_t, std::shared_ptr<qemu::IOMMUMemoryRegion>> m_mmio_mrs;
89
91 {
92 public:
93 std::shared_ptr<qemu::MemoryRegion> m_root;
94 m_mem_obj(qemu::LibQemu& inst) { m_root.reset(new qemu::MemoryRegion(inst.object_new<qemu::MemoryRegion>())); }
95 m_mem_obj(std::shared_ptr<qemu::MemoryRegion> memory): m_root(std::move(memory)) {}
96 };
97 m_mem_obj* m_r = nullptr;
98
99 // we use an ordered map to find and combine elements
100 std::map<DmiRegionAliasKey, DmiRegionAlias::Ptr> m_dmi_aliases;
101 using AliasesIterator = std::map<DmiRegionAliasKey, DmiRegionAlias::Ptr>::iterator;
102
103 // Mutable overload: ordered map (std::map-like) floor lookup
104 template <class Map>
105 static inline auto find_region(Map& table, uint64_t addr) -> typename Map::iterator
106 {
107 if (table.empty()) return table.end();
108
109 auto it = table.upper_bound(addr);
110 if (it != table.begin()) {
111 --it;
112 const auto& e = it->second;
113 const uint64_t masked_addr = (addr & ~e.addr_mask);
114
115 if (masked_addr == it->first) {
116 return it;
117 }
118 }
119 return table.end();
120 }
121
122 void init_payload(TlmPayload& trans, tlm::tlm_command command, uint64_t addr, uint64_t* val, unsigned int size)
123 {
124 trans.set_command(command);
125 trans.set_address(addr);
126 trans.set_data_ptr(reinterpret_cast<unsigned char*>(val));
127 trans.set_data_length(size);
128 trans.set_streaming_width(size);
129 trans.set_byte_enable_length(0);
130 trans.set_dmi_allowed(false);
131 trans.set_response_status(tlm::TLM_INCOMPLETE_RESPONSE);
132
133 m_initiator.initiator_customize_tlm_payload(trans);
134 }
135
136 void add_dmi_mr_alias(DmiRegionAlias::Ptr alias)
137 {
138 SCP_INFO(()) << "Adding " << *alias;
139 qemu::MemoryRegion alias_mr = alias->get_alias_mr();
140 m_r->m_root->add_subregion(alias_mr, alias->get_start());
141 alias->set_installed();
142 }
143
144 void del_dmi_mr_alias(const DmiRegionAlias::Ptr alias)
145 {
146 if (!alias->is_installed()) {
147 return;
148 }
149 SCP_INFO(()) << "Removing " << *alias;
150 m_r->m_root->del_subregion(alias->get_alias_mr());
151 }
152
164 void dmi_translate(qemu::IOMMUMemoryRegion::IOMMUTLBEntry* te, std::shared_ptr<qemu::IOMMUMemoryRegion> iommumr,
165 uint64_t base_addr, uint64_t addr, qemu::IOMMUMemoryRegion::IOMMUAccessFlags flags, int idx)
166 {
167 TlmPayload ltrans;
169
170 SCP_TRACE(())("dmi_translate for base 0x{:x} addr 0x{:x}", base_addr, addr);
171
172 /*
173 * Fast path : check to see if the TE is already cached, if so return it straight away.
174 * NB, this happens rarely, as QEMU will cache the result itself, but
175 * if the region returned previously covers more than a single min_page_sz, then QEMU will re-request
176 * for the other pages.
177 */
178 {
179 std::lock_guard<std::mutex> lock(m_mutex);
180
181 auto it = find_region(iommumr->m_mapped_te, addr);
182 if (it != iommumr->m_mapped_te.end()) {
183 *te = it->second;
184 // This is the DMI cache, so we must re-construct the actual required TE from this case.
185 // It will likely have a 'stale' address.
186 te->iova = addr;
187 te->translated_addr = (it->second.translated_addr & ~(it->second.addr_mask)) +
188 (addr & (it->second.addr_mask));
189
190 SCP_TRACE(())
191 ("FAST translate for 0x{:x} : 0x{:x}->0x{:x} (mask 0x{:x}) perm={}", addr, te->iova,
192 te->translated_addr, te->addr_mask, te->perm);
193
194 return;
195 }
196 }
197 /*
198 * Slow path, use DMI to investigate the memory, and see what sort of TE we can set up
199 *
200 * There are 3 options
201 * 1/ a real IOMMU region that should be mapped into the IOMMU address space
202 * 2/ a 'dmi-able' region which is not an IOMMU (e.g. local memory)
203 * 3/ a 'non-dmi-able' object (e.g. an MMIO device) - a minimum page size will be used for this.
204 *
205 */
206
207 SCP_DEBUG(())("Doing Translate for {:x} (Absolute 0x{:x})", addr, addr + base_addr);
208
210 init_payload(ltrans, tlm::TLM_IGNORE_COMMAND, base_addr + addr, &tmp, 0);
211 ltrans.set_extension(&lu_dmi);
212 tlm::tlm_dmi ldmi_data;
213
214 if ((*this)->get_direct_mem_ptr(ltrans, ldmi_data)) {
215 if (lu_dmi.has_dmi(gs::tlm_dmi_ex::dmi_iommu)) {
216 // Add te to 'special' IOMMU address space
217 tlm::tlm_dmi lu_dmi_data = lu_dmi.get_last(gs::tlm_dmi_ex::dmi_iommu);
218 if (0 == iommumr->m_dmi_aliases_te.count(lu_dmi_data.get_start_address())) {
219 qemu::RcuReadLock l_rcu_read_lock = m_inst.get().rcu_read_lock_new();
220 // take our own memory here, dont use an alias as
221 // we may have different sizes for the underlying DMI
222
223 DmiRegion region = DmiRegion(lu_dmi_data, 0, m_inst.get());
224 SCP_DEBUG(())
225 ("Adding IOMMU DMI Region start 0x{:x} - 0x{:x}", lu_dmi_data.get_start_address(),
226 lu_dmi_data.get_start_address() + region.get_size() - 1);
227 iommumr->m_root_te.add_subregion(region.get_mut_mr(), lu_dmi_data.get_start_address());
228 iommumr->m_dmi_aliases_te[lu_dmi_data.get_start_address()] = std::make_shared<DmiRegion>(region);
229 }
230
231 te->target_as = iommumr->m_as_te->get_ptr();
232 auto mask = ldmi_data.get_end_address() - ldmi_data.get_start_address();
233 te->addr_mask = mask;
234 te->iova = addr;
235 te->translated_addr = (lu_dmi_data.get_start_address() +
236 (ldmi_data.get_dmi_ptr() - lu_dmi_data.get_dmi_ptr())) +
237 (addr & mask);
238 te->perm = (qemu::IOMMUMemoryRegion::IOMMUAccessFlags)ldmi_data.get_granted_access();
239
240 SCP_DEBUG(())
241 ("Translate IOMMU 0x{:x}->0x{:x} (mask 0x{:x})", te->iova, te->translated_addr, te->addr_mask);
242
243 } else {
244 // no underlying DMI, add a 1-1 passthrough to normal address space
245 if (0 == iommumr->m_dmi_aliases_io.count(ldmi_data.get_start_address())) {
246 qemu::RcuReadLock l_rcu_read_lock = m_inst.get().rcu_read_lock_new();
247 DmiRegionAlias::Ptr alias = m_inst.get_dmi_manager().get_new_region_alias(ldmi_data);
248 SCP_DEBUG(()) << "Adding DMI Region alias " << *alias;
249 qemu::MemoryRegion alias_mr = alias->get_alias_mr();
250 iommumr->m_root_io.add_subregion(alias_mr, alias->get_start());
251 alias->set_installed();
252 iommumr->m_dmi_aliases_io[alias->get_start()] = alias;
253 }
254 auto mask = iommumr->min_page_sz;
255 te->target_as = iommumr->m_as_io->get_ptr();
256 te->addr_mask = mask;
257 te->iova = addr & ~mask;
258 te->translated_addr = (addr & ~mask) + base_addr;
259 te->perm = (qemu::IOMMUMemoryRegion::IOMMUAccessFlags)ldmi_data.get_granted_access();
260
261 SCP_DEBUG(())
262 ("Translate 1-1 passthrough 0x{:x}->0x{:x} (mask 0x{:x})", te->iova, te->translated_addr,
263 te->addr_mask);
264 }
265
266 std::lock_guard<std::mutex> lock(m_mutex);
267 /* It is possible that this region overlaps an existing region (a 1-1 MMIO).
268 * QEMU will likely take this region, but both should be valid, and the other
269 * region will be removed in due course
270 */
271 iommumr->m_mapped_te[addr & ~te->addr_mask] = *te;
272
273 SCP_DEBUG(())
274 ("Caching TE at addr 0x{:x} (mask {:x})", addr & ~te->addr_mask, te->addr_mask);
275
276 } else {
277 // No DMI at all, either an MMIO, or a DMI failure, setup for a 1-1 translation for the minimal page
278 // in the normal address space
279
280 te->target_as = iommumr->m_as_io->get_ptr();
281 te->addr_mask = (1 << iommumr->min_page_sz) - 1;
282 te->iova = addr & ~te->addr_mask;
283 te->translated_addr = (addr & ~te->addr_mask) + base_addr;
284 te->perm = qemu::IOMMUMemoryRegion::IOMMU_RW;
285
286 if (iommumr->m_mapped_te.find(addr & ~te->addr_mask) != iommumr->m_mapped_te.end()) {
287 SCP_FATAL(())("Trying to add a 1-1 mapping over an existing mapping");
288 }
289 // We need to add it so we can remove it (!)
290 iommumr->m_mapped_te[addr & ~te->addr_mask] = *te;
291
292 SCP_DEBUG(())
293 ("Translate 1-1 limited passthrough 0x{:x}->0x{:x} (mask 0x{:x})", te->iova, te->translated_addr,
294 te->addr_mask);
295 }
296 ltrans.clear_extension(&lu_dmi);
297 }
298
339 tlm::tlm_dmi check_dmi_hint_locked(TlmPayload& trans)
340 {
341 assert(trans.is_dmi_allowed());
342 tlm::tlm_dmi dmi_data;
343 int shm_fd = -1;
344 auto addr = trans.get_address();
345 /* We got a DMI hint, lets just make sure this isn't in an existing m_mmio_mrs region
346 * Because if it is, what probably happened is that we took the MMIO path
347 * rather than getting a translation done. We should invalidate any mmio 1-1 mapping
348 * for this address */
349 for (auto m : m_mmio_mrs) {
350 auto mr_start = m.first;
351 auto mr_end = m.first + m.second->get_size() - 1;
352 if (mr_start <= addr && addr <= mr_end) {
353 // Use masked floor lookup to find the TE covering 'addr'
354 auto it = find_region(m.second->m_mapped_te, addr - mr_start);
355 if (it != m.second->m_mapped_te.end()) {
356 uint64_t removed_addr = it->first + mr_start;
357 SCP_TRACE(())("Suspected MMIO Region removed 0x{:x} (mask 0x{:x})", removed_addr,
358 it->second.addr_mask);
359 m.second->iommu_unmap(&(it->second));
360 m.second->m_mapped_te.erase(it);
361 }
362 SCP_TRACE(())("Suspected MMIO Region(s) removed arround address 0x{:x}", addr);
363 return dmi_data;
364 }
365 }
366
367 SCP_INFO(()) << "DMI request for address 0x" << std::hex << trans.get_address();
368
369 // It is 'safer' from the SystemC perspective to m_on_sysc.run_on_sysc([this,
370 // &trans]{...}).
372
373 trans.set_extension(&u_dmi);
374 bool dmi_valid = (*this)->get_direct_mem_ptr(trans, dmi_data);
375 trans.clear_extension(&u_dmi);
376 if (!dmi_valid) {
377 SCP_INFO(())("No DMI available for {:x}", trans.get_address());
378 /* this is used by the map function below
379 * - a better plan may be to tag memories to be mapped so we dont need this
380 */
381 if (u_dmi.has_dmi(gs::tlm_dmi_ex::dmi_mapped)) {
382 tlm::tlm_dmi first_map = u_dmi.get_first(gs::tlm_dmi_ex::dmi_mapped);
383 return first_map;
384 }
385 if (u_dmi.has_dmi(gs::tlm_dmi_ex::dmi_nomap)) {
386 tlm::tlm_dmi first_nomap = u_dmi.get_first(gs::tlm_dmi_ex::dmi_nomap);
387 return first_nomap;
388 }
389 return dmi_data;
390 }
391
392 /*
393 * This is the 'special' case of IOMMU's which require an IOMMU memory region setup
394 * The IOMMU will be constructed here, but not populated - that will happen in the callback
395 * There will be a 'pair' of new regions, one to hold non iommu regions within this space,
396 * the other to hold iommu regions themselves.
397 *
398 * In extreme circumstances, if the IOMMU DMI to this region previously failed, we may have
399 * ended up with a normal DMI region here, which needs removing. We do that here, and then simply
400 * return and wait for a new access to sort things out.
401 */
402 if (u_dmi.has_dmi(gs::tlm_dmi_ex::dmi_iommu)) {
403 /* We have an IOMMU request setup an IOMMU region */
404 SCP_INFO(())("IOMMU DMI available for {:x}", trans.get_address());
405
406 /* The first mapped DMI will be the scope of the IOMMU region from our perspective */
407 tlm::tlm_dmi first_map = u_dmi.get_first(gs::tlm_dmi_ex::dmi_mapped);
408
409 uint64_t start = first_map.get_start_address();
410 uint64_t size = (first_map.get_end_address() - first_map.get_start_address()) + 1;
411 auto itr = m_mmio_mrs.find(start);
412 if (itr == m_mmio_mrs.end()) {
413 // Better check for overlapping iommu's - they must be banned !!
414
415 qemu::RcuReadLock rcu_read_lock = m_inst.get().rcu_read_lock_new();
416
417 /* invalidate any 'old' regions we happen to have mapped previously */
418 invalidate_single_range(start, start + size - 1);
419
420 SCP_INFO(())
421 ("Adding IOMMU for VA 0x{:x} [0x{:x} - 0x{:x}]", trans.get_address(), start, start + size - 1);
422
423 using namespace std::placeholders;
424 qemu::MemoryRegionOpsPtr ops;
425 ops = m_inst.get().memory_region_ops_new();
426 ops->set_read_callback(std::bind(&QemuInitiatorSocket::qemu_io_read, this, _1, _2, _3, _4));
427 ops->set_write_callback(std::bind(&QemuInitiatorSocket::qemu_io_write, this, _1, _2, _3, _4));
428 ops->set_max_access_size(8);
429
430 auto iommumr = std::make_shared<qemu::IOMMUMemoryRegion>(
432
433 iommumr->init(*iommumr, "dmi-manager-iommu", size, ops,
435 qemu::IOMMUMemoryRegion::IOMMUAccessFlags flags,
436 int idx) { dmi_translate(te, iommumr, start, addr, flags, idx); });
437 {
438 std::lock_guard<std::mutex> lock(m_mutex);
439 m_mmio_mrs[start] = iommumr;
440 }
441 m_r->m_root->add_subregion(*iommumr, start);
442
443 } else {
444 // Previously when looking up a TE, we failed to get the lock, so the DMI failed, we ended up in a
445 // limited passthrough. Which causes us to re-arrive here.... but, with a DMI hint. Hopefully next time
446 // the TE is looked up, we'll get the lock and re-establish the translation. In any case we should do
447 // nothing and simply return
448 // Moving to a cached TE will improve speed and prevent this from happening?
449 SCP_DEBUG(())
450 ("Memory request should be directed via MMIO interface {:x} {:x}", start, trans.get_address());
451
452 // std::lock_guard<std::mutex> lock(m_mutex);
453
454 uint64_t start_range = itr->first;
455 uint64_t end_range = itr->first + itr->second->get_size() - 1;
456
457 invalidate_direct_mem_ptr(start_range, end_range);
458 }
459 return dmi_data;
460 }
461
463 // it's ok that ShmemIDExtension is not added to trans as this should only happen when
464 // memory is a shared memory type.
465 if (shm_ext) {
466 shm_fd = shm_ext->m_fd;
467 }
468
469 SCP_INFO(()) << "DMI Adding for address 0x" << std::hex << trans.get_address();
470
471 // The upper limit is set within QEMU by the TBU
472 // e.g. 1k small pages for ARM.
473 // setting to 1/2 the size of the ARM TARGET_PAGE_SIZE,
474 // Comment from QEMU code:
475 /* The physical section number is ORed with a page-aligned
476 * pointer to produce the iotlb entries. Thus it should
477 * never overflow into the page-aligned value.
478 */
479#define MAX_MAP 250
480
481 // Current function may be called by the MMIO thread which does not hold
482 // any RCU read lock. This is required in case of a memory transaction
483 // commit on a TCG accelerated Qemu instance
484 qemu::RcuReadLock rcu_read_lock = m_inst.get().rcu_read_lock_new();
485
486 if (m_dmi_aliases.size() > MAX_MAP) {
487 SCP_FATAL(())("Too many DMI regions requested, consider using an IOMMU");
488 }
489 uint64_t start = dmi_data.get_start_address();
490 uint64_t end = dmi_data.get_end_address();
491
492 if (0 == m_dmi_aliases.count(start)) {
493 SCP_INFO(()) << "Adding DMI for range [0x" << std::hex << dmi_data.get_start_address() << "-0x" << std::hex
494 << dmi_data.get_end_address() << "]";
495
496 DmiRegionAlias::Ptr alias = m_inst.get_dmi_manager().get_new_region_alias(dmi_data, shm_fd);
497
498 m_dmi_aliases[start] = alias;
499 add_dmi_mr_alias(m_dmi_aliases[start]);
500 } else {
501 SCP_INFO(())("Already have DMI for 0x{:x}", start);
502 }
503 return dmi_data;
504 }
505
506 void check_qemu_mr_hint(TlmPayload& trans)
507 {
508 QemuMrHintTlmExtension* ext = nullptr;
510
511 trans.get_extension(ext);
512
513 if (ext == nullptr) {
514 return;
515 }
516
518
519 if (target_mr.get_inst_id() != m_dev.get_inst_id()) {
520 return;
521 }
522
523 mapping_addr = trans.get_address() - ext->get_offset();
524
526
527 mr.init_alias(m_dev, "mr-alias", target_mr, 0, target_mr.get_size());
528 m_r->m_root->add_subregion(mr, mapping_addr);
529 }
530
531 void do_regular_access(TlmPayload& trans)
532 {
533 using sc_core::sc_time;
534
535 uint64_t addr = trans.get_address();
536 sc_time now = m_initiator.initiator_get_local_time();
537
538 m_inst.get().unlock_iothread();
539 m_on_sysc.run_on_sysc([this, &trans, &now] { (*this)->b_transport(trans, now); });
540 m_inst.get().lock_iothread();
541 /*
542 * Reset transaction address before dmi check (could be altered by
543 * b_transport).
544 */
545 trans.set_address(addr);
546 check_qemu_mr_hint(trans);
547 if (trans.is_dmi_allowed()) {
549 }
550
551 m_initiator.initiator_set_local_time(now);
552 }
553
554 void do_debug_access(TlmPayload& trans)
555 {
556 m_inst.get().unlock_iothread();
557 m_on_sysc.run_on_sysc([this, &trans] { (*this)->transport_dbg(trans); });
558 m_inst.get().lock_iothread();
559 }
560
561 void do_direct_access(TlmPayload& trans)
562 {
563 sc_core::sc_time now = m_initiator.initiator_get_local_time();
564 (*this)->b_transport(trans, now);
565 }
566
567 MemTxResult qemu_io_access(tlm::tlm_command command, uint64_t addr, uint64_t* val, unsigned int size,
568 MemTxAttrs attrs)
569 {
570 TlmPayload trans;
571 if (m_finished) return qemu::MemoryRegionOps::MemTxError;
572
573 init_payload(trans, command, addr, val, size);
574
575 if (trans.get_extension<ExclusiveAccessTlmExtension>()) {
576 /* in the case of an exclusive access keep the iolock (and assume NO side-effects)
577 * clearly dangerous, but exclusives are not guaranteed to work on IO space anyway
578 */
579 do_direct_access(trans);
580 } else {
581 if (!m_inst.g_rec_qemu_io_lock.try_lock() && !is_on_sysc()) {
582 /* Allow only a single access, but handle re-entrant code,
583 * while allowing side-effects in SystemC (e.g. calling wait)
584 * [NB re-entrant code caused via memory listeners to
585 * creation of memory regions (due to DMI) in some models]
586 */
587 m_inst.get().unlock_iothread();
588 m_inst.g_rec_qemu_io_lock.lock();
589 m_inst.get().lock_iothread();
590 }
591 reentrancy++;
592
593 /* Force re-entrant code to use a direct access (safe for reentrancy with no side effects) */
594 if (reentrancy > 1) {
595 do_direct_access(trans);
596 } else if (attrs.debug) {
597 do_debug_access(trans);
598 } else {
599 do_regular_access(trans);
600 }
601
602 reentrancy--;
603 m_inst.g_rec_qemu_io_lock.unlock();
604 }
605 m_initiator.initiator_tidy_tlm_payload(trans);
606
607 switch (trans.get_response_status()) {
608 case tlm::TLM_OK_RESPONSE:
609 return qemu::MemoryRegionOps::MemTxOK;
610
611 case tlm::TLM_ADDRESS_ERROR_RESPONSE:
612 return qemu::MemoryRegionOps::MemTxDecodeError;
613
614 default:
615 return qemu::MemoryRegionOps::MemTxError;
616 }
617 }
618
619public:
620 MemTxResult qemu_io_read(uint64_t addr, uint64_t* val, unsigned int size, MemTxAttrs attrs)
621 {
622 return qemu_io_access(tlm::TLM_READ_COMMAND, addr, val, size, attrs);
623 }
624
625 MemTxResult qemu_io_write(uint64_t addr, uint64_t val, unsigned int size, MemTxAttrs attrs)
626 {
627 return qemu_io_access(tlm::TLM_WRITE_COMMAND, addr, &val, size, attrs);
628 }
629
630 bool is_on_sysc() const { return std::this_thread::get_id() == m_thread_id; }
631
633 : TlmInitiatorSocket(name)
634 , m_inst(inst)
635 , m_initiator(initiator)
636 , m_thread_id(std::this_thread::get_id())
637 , m_on_sysc(sc_core::sc_gen_unique_name("initiator_run_on_sysc"))
638 {
639 SCP_DEBUG(()) << "QemuInitiatorSocket constructor";
640 TlmInitiatorSocket::bind(*static_cast<tlm::tlm_bw_transport_if<>*>(this));
641 }
642
643 void init(qemu::Device& dev, const char* prop)
644 {
645 using namespace std::placeholders;
646
647 qemu::LibQemu& inst = m_inst.get();
648 qemu::MemoryRegionOpsPtr ops;
649
650 m_r = new m_mem_obj(inst); // oot = inst.object_new<qemu::MemoryRegion>();
651 ops = inst.memory_region_ops_new();
652
653 ops->set_read_callback(std::bind(&QemuInitiatorSocket::qemu_io_read, this, _1, _2, _3, _4));
654 ops->set_write_callback(std::bind(&QemuInitiatorSocket::qemu_io_write, this, _1, _2, _3, _4));
655 ops->set_max_access_size(8);
656
657 m_r->m_root->init_io(dev, TlmInitiatorSocket::name(), std::numeric_limits<uint64_t>::max(), ops);
658 dev.set_prop_link(prop, *m_r->m_root);
659
660 m_dev = dev;
661 }
662
663 void end_of_simulation()
664 {
665 m_finished = true;
666 }
667
669 {
670#if 0
671 // This could happen during void end_of_simulation() but there is a race with other units trying
672 // to pull down their DMI's
673 if (m_r) {
674 if (m_r->m_root) {
675 m_r->m_root->removeSubRegions();
676 }
677 delete m_r;
678 m_r = nullptr;
679 }
680#endif
681 // dmimgr_unlock();
682 }
683
685 {
686 // this function is relatively expensive, and called a lot, it should be done a different way and removed.
687 if (m_finished) return;
688
689 SCP_DEBUG(()) << "Mapping request for address [0x" << std::hex << addr << "-0x" << addr + len - 1 << "]";
690
691 TlmPayload trans;
694 init_payload(trans, tlm::TLM_IGNORE_COMMAND, current_addr, &temp, 0);
695 trans.set_dmi_allowed(true);
696
697 while (current_addr < addr + len) {
698 tlm::tlm_dmi dmi_data = check_dmi_hint_locked(trans);
699
700 // Current addr is an absolute address while the dmi range might be relative
701 // hence not necesseraly current_addr falls withing dmi_range address boundaries
702 // TODO: is there a way to retrieve the dmi range block offset?
703 SCP_INFO(()) << "0x" << std::hex << current_addr << " mapped [0x" << dmi_data.get_start_address() << "-0x"
704 << dmi_data.get_end_address() << "]";
705
706 // The allocated range may not span the whole length required for mapping
707 assert(dmi_data.get_end_address() >= current_addr);
708 current_addr = dmi_data.get_end_address();
709 if (current_addr >= addr + len) break; // Catch potential loop-rounds
710 current_addr += 1;
711 trans.set_address(current_addr);
712 }
713
714 m_initiator.initiator_tidy_tlm_payload(trans);
715 }
716
717 void init_global(qemu::Device& dev)
718 {
719 using namespace std::placeholders;
720
721 qemu::LibQemu& inst = m_inst.get();
722 qemu::MemoryRegionOpsPtr ops;
723 ops = inst.memory_region_ops_new();
724
725 ops->set_read_callback(std::bind(&QemuInitiatorSocket::qemu_io_read, this, _1, _2, _3, _4));
726 ops->set_write_callback(std::bind(&QemuInitiatorSocket::qemu_io_write, this, _1, _2, _3, _4));
727 ops->set_max_access_size(8);
728
729 auto system_memory = inst.get_system_memory();
730 system_memory->init_io(dev, TlmInitiatorSocket::name(), std::numeric_limits<uint64_t>::max() - 1, ops);
731 m_r = new m_mem_obj(std::move(system_memory));
732
733 m_as = inst.address_space_get_system_memory();
734 // System memory has been changed from container to "io", this is relevant
735 // for flatview, and to reflect that we can just update the topology
736 m_as->update_topology();
737
738 /* Sometimes memory regions are added to the global address space by (for instance) virt devices rempaiing
739 * system memory such that they can directly access it. The mapped memory region needs it's DMI pointers
740 * adjusted. qemu_map will do that remapping of the DMI.
741 */
742 m_listener = inst.memory_listener_new();
743 m_listener->set_map_callback(std::bind(&QemuInitiatorSocket::qemu_map, this, _1, _2, _3));
744 m_listener->register_as(m_as);
745
746 m_dev = dev;
747 }
748
749 /* tlm::tlm_bw_transport_if<> */
750 virtual tlm::tlm_sync_enum nb_transport_bw(tlm::tlm_generic_payload& trans, tlm::tlm_phase& phase,
751 sc_core::sc_time& t)
752 {
753 /* Should not be reached */
754 assert(false);
755 return tlm::TLM_COMPLETED;
756 }
757
758 virtual AliasesIterator remove_alias(AliasesIterator it)
759 {
760 DmiRegionAlias::Ptr r = it->second; /*
761 * Invalidate this region. Do not bother with
762 * partial invalidation as it's really not worth
763 * it. Better let the target model returns sub-DMI
764 * regions during future accesses.
765 */
766
767 /*
768 * Mark the whole region this alias maps to as invalid. This has
769 * the effect of marking all the other aliases mapping to the same
770 * region as invalid too. If a DMI request for the same region is
771 * already in progress, it will have a chance to detect it is now
772 * invalid before mapping it on the QEMU root MR (see
773 * check_dmi_hint comment).
774 */
775 // r->invalidate_region();
776
777 assert(r->is_installed());
778 // if (!r->is_installed()) {
779 /*
780 * The alias is not mapped onto the QEMU root MR yet. Simply
781 * skip it. It will be removed from m_dmi_aliases by
782 * check_dmi_hint.
783 */
784 // return it++;
785 // }
786
787 /*
788 * Remove the alias from the root MR. This is enough to perform
789 * required invalidations on QEMU's side in a thread-safe manner.
790 */
791 del_dmi_mr_alias(r);
792
793 /*
794 * Remove the alias from the collection. The DmiRegionAlias object
795 * is then destructed, leading to the destruction of the DmiRegion
796 * shared pointer it contains. When no more alias reference this
797 * region, it is in turn destructed, effectively destroying the
798 * corresponding memory region in QEMU.
799 */
800 return m_dmi_aliases.erase(it);
801 }
802
803private:
804 void invalidate_single_range(sc_dt::uint64 start_range, sc_dt::uint64 end_range)
805 {
806 auto it = m_dmi_aliases.upper_bound(start_range);
807
808 if (it != m_dmi_aliases.begin()) {
809 /*
810 * Start with the preceding region, as it may already cross the
811 * range we must invalidate.
812 */
813 it--;
814 }
815 while (it != m_dmi_aliases.end()) {
816 DmiRegionAlias::Ptr r = it->second;
817
818 if (r->get_start() > end_range) {
819 /* We've got out of the invalidation range */
820 break;
821 }
822
823 if (r->get_end() < start_range) {
824 /* We are not in yet */
825 it++;
826 continue;
827 }
828
829 it = remove_alias(it);
830
831 SCP_DEBUG(()) << "Invalidated region [0x" << std::hex << r->get_start() << ", 0x" << std::hex
832 << r->get_end() << "]";
833 }
834 }
835
836 void invalidate_ranges_safe_cb()
837 {
838 if (m_finished) return;
839 std::lock_guard<std::mutex> lock(m_mutex);
840
841 SCP_DEBUG(()) << "Invalidating " << m_ranges.size() << " ranges";
842 auto rit = m_ranges.begin();
843 while (rit != m_ranges.end()) {
844 invalidate_single_range(rit->first, rit->second);
845 rit = m_ranges.erase(rit);
846 }
847 }
848
850 {
851 uint64_t end = addr + mask + 1;
853 }
854
855public:
856 virtual void invalidate_direct_mem_ptr(sc_dt::uint64 start_range, sc_dt::uint64 end_range)
857 {
858 if (m_finished) return;
859 SCP_DEBUG(()) << "DMI invalidate [0x" << std::hex << start_range << ", 0x" << std::hex << end_range << "]";
860
861 std::lock_guard<std::mutex> lock(m_mutex);
862
863 for (auto m : m_mmio_mrs) {
864 auto mr_start = m.first;
865 auto mr_end = m.first + m.second->get_size() - 1;
866 // if the MR overlaps or is overlapped by the invalidation range
867 if (start_range <= mr_end && mr_start <= end_range) {
870 auto it = m.second->m_mapped_te.lower_bound(mr_rel_start);
871
872 // Check the previous interval (it might still match)
873 if (it != m.second->m_mapped_te.begin()) {
874 auto prev = std::prev(it);
875 // only checking if the start of the region is in the area requested.
876 if (region_match(mr_rel_start, mr_rel_end, prev->first, prev->second.addr_mask)) {
877 m.second->iommu_unmap(&(prev->second));
878 m.second->m_mapped_te.erase(prev);
879 SCP_TRACE(())("Region removed 0x{:x} (mask 0x{:x})", prev->first, it->second.addr_mask);
880 }
881 }
882
883 // Scan forward while region bases are <= end
884 while (it != m.second->m_mapped_te.end() && it->first <= mr_rel_end) {
885 if (region_match(mr_rel_start, mr_rel_end, it->first, it->second.addr_mask)) {
886 m.second->iommu_unmap(&(it->second));
887 it = m.second->m_mapped_te.erase(it); // erase returns next iterator
888 SCP_TRACE(())("Region removed 0x{:x} (mask 0x{:x})", it->first, it->second.addr_mask);
889 } else {
890 ++it;
891 }
892 }
893
894 SCP_DEBUG(())("Region(s) removed in range [0x{:x} - 0x{:x}] from mr [0x{:x} - 0x{:x}]", start_range,
896 return;
897 }
898 }
899
900 m_ranges.push_back(std::make_pair(start_range, end_range));
901 m_initiator.initiator_async_run([&]() { invalidate_ranges_safe_cb(); });
902 }
903
904 virtual void reset()
905 {
906 std::lock_guard<std::mutex> lock(m_mutex);
907
908 for (auto m : m_mmio_mrs) {
909 m.second->m_mapped_te.clear();
910 auto it = m_dmi_aliases.begin();
911 while (it != m_dmi_aliases.end()) {
912 DmiRegionAlias::Ptr r = it->second;
913 it = remove_alias(it);
914 }
915 }
916 }
917};
918
919#endif
Exclusive load/store TLM extension.
Definition exclusive-access.h:36
Definition initiator.h:33
Definition initiator.h:91
TLM-2.0 initiator socket specialisation for QEMU AddressSpace mapping.
Definition initiator.h:58
tlm::tlm_dmi check_dmi_hint_locked(TlmPayload &trans)
Request a DMI region, ask the QEMU instance DMI manager for a DMI region alias for it and map it on t...
Definition initiator.h:339
void dmi_translate(qemu::IOMMUMemoryRegion::IOMMUTLBEntry *te, std::shared_ptr< qemu::IOMMUMemoryRegion > iommumr, uint64_t base_addr, uint64_t addr, qemu::IOMMUMemoryRegion::IOMMUAccessFlags flags, int idx)
Use DMI data to set up a qemu IOMMU translate.
Definition initiator.h:164
An alias to a DMI region.
Definition dmi-manager.h:140
a DMI region
Definition dmi-manager.h:77
DmiRegionAlias::Ptr get_new_region_alias(const tlm::tlm_dmi &info, int fd=-1)
Create a new alias for the DMI region designated by info
Definition dmi-manager.h:283
This class encapsulates a libqemu-cxx qemu::LibQemu instance. It handles QEMU parameters and instance...
Definition qemu-instance.h:89
qemu::LibQemu & get()
Returns the underlying qemu::LibQemu instance.
Definition qemu-instance.h:474
QemuInstanceDmiManager & get_dmi_manager()
Returns the locked QemuInstanceDmiManager instance.
Definition qemu-instance.h:490
Definition qemu-mr-hint.h:17
Definition target.h:160
Definition shmem_extension.h:24
Definition underlying-dmi.h:34
Definition runonsysc.h:23
bool run_on_sysc(std::function< void()> job_entry, bool wait=true)
Run a job on the SystemC kernel thread.
Definition runonsysc.h:211
Definition libqemu-cxx.h:613
Definition libqemu-cxx.h:87
Definition libqemu-cxx.h:489
Definition libqemu-cxx.h:363
Definition libqemu-cxx.h:214
Definition libqemu-cxx.h:472
Definition libqemu-cxx.h:326