quic/qbox
Loading...
Searching...
No Matches
initiator.h
1/*
2 * This file is part of libqbox
3 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * Author: GreenSocs 2021
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9#ifndef _LIBQBOX_PORTS_INITIATOR_H
10#define _LIBQBOX_PORTS_INITIATOR_H
11
12#include <functional>
13#include <limits>
14#include <cassert>
15#include <cinttypes>
16
17#include <tlm>
18
19#include <libqemu-cxx/libqemu-cxx.h>
20
21#include <libgssync.h>
22
23#include <scp/report.h>
24
25#include <qemu-instance.h>
26#include <tlm-extensions/qemu-mr-hint.h>
27#include <tlm-extensions/exclusive-access.h>
28#include <tlm-extensions/shmem_extension.h>
29#include <tlm-extensions/underlying-dmi.h>
30#include <tlm_sockets_buswidth.h>
31
32/*
33 * Define this as 1 if you want to enable the cache debug mechanism below.
34 */
35#define DEBUG_CACHE 0
36
38{
39public:
40 using TlmPayload = tlm::tlm_generic_payload;
41
42 virtual void initiator_customize_tlm_payload(TlmPayload& payload) = 0;
43 virtual void initiator_tidy_tlm_payload(TlmPayload& payload) = 0;
44 virtual sc_core::sc_time initiator_get_local_time() = 0;
45 virtual void initiator_set_local_time(const sc_core::sc_time&) = 0;
46 virtual void initiator_async_run(qemu::Cpu::AsyncJobFn job) = 0;
47};
48
59template <unsigned int BUSWIDTH = DEFAULT_TLM_BUSWIDTH>
61 : public tlm::tlm_initiator_socket<BUSWIDTH, tlm::tlm_base_protocol_types, 1, sc_core::SC_ZERO_OR_MORE_BOUND>,
62 public tlm::tlm_bw_transport_if<>
63{
64private:
65 std::mutex m_mutex;
66 std::vector<std::pair<sc_dt::uint64, sc_dt::uint64>> m_ranges;
67
68public:
69 SCP_LOGGER(());
70
71 using TlmInitiatorSocket = tlm::tlm_initiator_socket<BUSWIDTH, tlm::tlm_base_protocol_types, 1,
72 sc_core::SC_ZERO_OR_MORE_BOUND>;
73 using TlmPayload = tlm::tlm_generic_payload;
74 using MemTxResult = qemu::MemoryRegionOps::MemTxResult;
78
80
81protected:
82 QemuInstance& m_inst;
83 QemuInitiatorIface& m_initiator;
84 qemu::Device m_dev;
85 gs::runonsysc m_on_sysc;
86 int reentrancy = 0;
87
88 bool m_finished = false;
89
90 std::shared_ptr<qemu::AddressSpace> m_as;
91 std::shared_ptr<qemu::MemoryListener> m_listener;
92 std::map<uint64_t, std::shared_ptr<qemu::IOMMUMemoryRegion>> m_mmio_mrs;
93
95 {
96 public:
97 std::shared_ptr<qemu::MemoryRegion> m_root;
98 m_mem_obj(qemu::LibQemu& inst) { m_root.reset(new qemu::MemoryRegion(inst.object_new<qemu::MemoryRegion>())); }
99 m_mem_obj(std::shared_ptr<qemu::MemoryRegion> memory): m_root(std::move(memory)) {}
100 };
101 m_mem_obj* m_r = nullptr;
102
103 // we use an ordered map to find and combine elements
104 std::map<DmiRegionAliasKey, DmiRegionAlias::Ptr> m_dmi_aliases;
105 using AliasesIterator = std::map<DmiRegionAliasKey, DmiRegionAlias::Ptr>::iterator;
106
107 void init_payload(TlmPayload& trans, tlm::tlm_command command, uint64_t addr, uint64_t* val, unsigned int size)
108 {
109 trans.set_command(command);
110 trans.set_address(addr);
111 trans.set_data_ptr(reinterpret_cast<unsigned char*>(val));
112 trans.set_data_length(size);
113 trans.set_streaming_width(size);
114 trans.set_byte_enable_length(0);
115 trans.set_dmi_allowed(false);
116 trans.set_response_status(tlm::TLM_INCOMPLETE_RESPONSE);
117
118 m_initiator.initiator_customize_tlm_payload(trans);
119 }
120
121 void add_dmi_mr_alias(DmiRegionAlias::Ptr alias)
122 {
123 SCP_INFO(()) << "Adding " << *alias;
124 qemu::MemoryRegion alias_mr = alias->get_alias_mr();
125 m_r->m_root->add_subregion(alias_mr, alias->get_start());
126 alias->set_installed();
127 }
128
129 void del_dmi_mr_alias(const DmiRegionAlias::Ptr alias)
130 {
131 if (!alias->is_installed()) {
132 return;
133 }
134 SCP_INFO(()) << "Removing " << *alias;
135 m_r->m_root->del_subregion(alias->get_alias_mr());
136 }
137
149 void dmi_translate(qemu::IOMMUMemoryRegion::IOMMUTLBEntry* te, std::shared_ptr<qemu::IOMMUMemoryRegion> iommumr,
150 uint64_t base_addr, uint64_t addr, qemu::IOMMUMemoryRegion::IOMMUAccessFlags flags, int idx)
151 {
152 TlmPayload ltrans;
154#if DEBUG_CACHE
155 bool incache = false;
157#endif // DEBUG_CACHE
158
159 /*
160 * Fast path : check to see if the TE is already cached, if so return it straight away.
161 */
162 {
163 // Really wish we didn't need the lock here
164 std::lock_guard<std::mutex> lock(m_mutex);
165
166#ifdef USE_UNORD
167 auto m = iommumr->m_mapped_te.find(addr >> iommumr->min_page_sz);
168 if (m != iommumr->m_mapped_te.end()) {
169 *te = m->second;
170 SCP_TRACE(())
171 ("FAST (unord) translate for 0x{:x} : 0x{:x}->0x{:x} (mask 0x{:x}) perm={}", addr, te->iova,
172 te->translated_addr, te->addr_mask, te->perm);
173 return;
174 }
175#else // USE_UNORD
176
177 if (iommumr->m_mapped_te.size() > 0) {
178 auto it = iommumr->m_mapped_te.upper_bound(addr);
179 if (it != iommumr->m_mapped_te.begin()) {
180 it--;
181 if (it != iommumr->m_mapped_te.end() && (it->first) == (addr & ~it->second.addr_mask)) {
182 *te = it->second;
183#if DEBUG_CACHE
184 tmpte = *te;
185 incache = true;
186#endif // DEBUG_CACHE
187 SCP_TRACE(())
188 ("FAST translate for 0x{:x} : 0x{:x}->0x{:x} (mask 0x{:x}) perl={}", addr, te->iova,
189 te->translated_addr, te->addr_mask, te->perm);
190
191 return;
192 }
193 }
194 }
195#endif // USE_UNORD
196 }
197
198 /*
199 * Slow path, use DMI to investigate the memory, and see what sort of TE we can set up
200 *
201 * There are 3 options
202 * 1/ a real IOMMU region that should be mapped into the IOMMU address space
203 * 2/ a 'dmi-able' region which is not an IOMMU (e.g. local memory)
204 * 3/ a 'non-dmi-able' object (e.g. an MMIO device) - a minimum page size will be used for this.
205 *
206 * Enable DEBUG_CACHE to see if the fast path should have been used.
207 */
208
209 // construct maximal mask.
210 uint64_t start_msk = (base_addr ^ (base_addr - 1)) >> 1;
211
212 SCP_DEBUG(())("Doing Translate for {:x} (Absolute 0x{:x})", addr, addr + base_addr);
213
215 init_payload(ltrans, tlm::TLM_IGNORE_COMMAND, base_addr + addr, &tmp, 0);
216 ltrans.set_extension(&lu_dmi);
217 tlm::tlm_dmi ldmi_data;
218
219 if ((*this)->get_direct_mem_ptr(ltrans, ldmi_data)) {
220 if (lu_dmi.has_dmi(gs::tlm_dmi_ex::dmi_iommu)) {
221 // Add te to 'special' IOMMU address space
222 tlm::tlm_dmi lu_dmi_data = lu_dmi.get_last(gs::tlm_dmi_ex::dmi_iommu);
223 if (0 == iommumr->m_dmi_aliases_te.count(lu_dmi_data.get_start_address())) {
224 qemu::RcuReadLock l_rcu_read_lock = m_inst.get().rcu_read_lock_new();
225 // take our own memory here, dont use an alias as
226 // we may have different sizes for the underlying DMI
227
228 DmiRegion region = DmiRegion(lu_dmi_data, 0, m_inst.get());
229 SCP_DEBUG(())
230 ("Adding IOMMU DMI Region start 0x{:x} - 0x{:x}", lu_dmi_data.get_start_address(),
231 lu_dmi_data.get_start_address() + region.get_size());
232 iommumr->m_root_te.add_subregion(region.get_mut_mr(), lu_dmi_data.get_start_address());
233 iommumr->m_dmi_aliases_te[lu_dmi_data.get_start_address()] = std::make_shared<DmiRegion>(region);
234 }
235
236 te->target_as = iommumr->m_as_te->get_ptr();
237 te->addr_mask = ldmi_data.get_end_address() - ldmi_data.get_start_address();
238 te->iova = addr;
239 te->translated_addr = (lu_dmi_data.get_start_address() +
240 (ldmi_data.get_dmi_ptr() - lu_dmi_data.get_dmi_ptr()));
241 te->perm = (qemu::IOMMUMemoryRegion::IOMMUAccessFlags)ldmi_data.get_granted_access();
242
243 SCP_DEBUG(())
244 ("Translate IOMMU 0x{:x}->0x{:x} (mask 0x{:x})", te->iova, te->translated_addr, te->addr_mask);
245
246 } else {
247 // no underlying DMI, add a 1-1 passthrough to normal address space
248 if (0 == iommumr->m_dmi_aliases_io.count(ldmi_data.get_start_address())) {
249 qemu::RcuReadLock l_rcu_read_lock = m_inst.get().rcu_read_lock_new();
250 DmiRegionAlias::Ptr alias = m_inst.get_dmi_manager().get_new_region_alias(ldmi_data);
251 SCP_DEBUG(()) << "Adding DMI Region alias " << *alias;
252 qemu::MemoryRegion alias_mr = alias->get_alias_mr();
253 iommumr->m_root.add_subregion(alias_mr, alias->get_start());
254 alias->set_installed();
255 iommumr->m_dmi_aliases_io[alias->get_start()] = alias;
256 }
257
258 te->target_as = iommumr->m_as->get_ptr();
259 te->addr_mask = start_msk;
260 te->iova = addr & ~start_msk;
261 te->translated_addr = (addr & ~start_msk) + base_addr;
262 te->perm = (qemu::IOMMUMemoryRegion::IOMMUAccessFlags)ldmi_data.get_granted_access();
263
264 SCP_DEBUG(())
265 ("Translate 1-1 passthrough 0x{:x}->0x{:x} (mask 0x{:x})", te->iova, te->translated_addr,
266 te->addr_mask);
267 }
268#if DEBUG_CACHE
269 if (incache) {
270 SCP_WARN(())("Could have used the cache! {:x}\n", addr);
271 assert(te->iova == tmpte.iova);
272 assert(te->target_as == tmpte.target_as);
273 assert(te->addr_mask == tmpte.addr_mask);
274 assert(te->translated_addr == tmpte.translated_addr);
275 assert(te->perm == tmpte.perm);
276 }
277#endif // DEBUG_CACHE
278 std::lock_guard<std::mutex> lock(m_mutex);
279#ifdef USE_UNORD
280 iommumr->m_mapped_te[(addr & ~te->addr_mask) >> iommumr->min_page_sz] = *te;
281#else // USE_UNORD
282 iommumr->m_mapped_te[addr & ~te->addr_mask] = *te;
283#endif // USE_UNORD
284 SCP_DEBUG(())
285 ("Caching TE at addr 0x{:x} (mask {:x})", addr & ~te->addr_mask, te->addr_mask);
286
287 } else {
288 // No DMI at all, either an MMIO, or a DMI failure, setup for a 1-1 translation for the minimal page
289 // in the normal address space
290
291 te->target_as = iommumr->m_as->get_ptr();
292 te->addr_mask = (1 << iommumr->min_page_sz) - 1;
293 te->iova = addr & ~te->addr_mask;
294 te->translated_addr = (addr & ~te->addr_mask) + base_addr;
295 te->perm = qemu::IOMMUMemoryRegion::IOMMU_RW;
296
297 SCP_DEBUG(())
298 ("Translate 1-1 limited passthrough 0x{:x}->0x{:x} (mask 0x{:x})", te->iova, te->translated_addr,
299 te->addr_mask);
300 }
301 ltrans.clear_extension(&lu_dmi);
302 }
303
344 tlm::tlm_dmi check_dmi_hint_locked(TlmPayload& trans)
345 {
346 assert(trans.is_dmi_allowed());
347 tlm::tlm_dmi dmi_data;
348 int shm_fd = -1;
349
350 SCP_INFO(()) << "DMI request for address 0x" << std::hex << trans.get_address();
351
352 // It is 'safer' from the SystemC perspective to m_on_sysc.run_on_sysc([this,
353 // &trans]{...}).
355
356 trans.set_extension(&u_dmi);
357 bool dmi_valid = (*this)->get_direct_mem_ptr(trans, dmi_data);
358 trans.clear_extension(&u_dmi);
359 if (!dmi_valid) {
360 SCP_INFO(())("No DMI available for {:x}", trans.get_address());
361 /* this is used by the map function below
362 * - a better plan may be to tag memories to be mapped so we dont need this
363 */
364 if (u_dmi.has_dmi(gs::tlm_dmi_ex::dmi_mapped)) {
365 tlm::tlm_dmi first_map = u_dmi.get_first(gs::tlm_dmi_ex::dmi_mapped);
366 return first_map;
367 }
368 if (u_dmi.has_dmi(gs::tlm_dmi_ex::dmi_nomap)) {
369 tlm::tlm_dmi first_nomap = u_dmi.get_first(gs::tlm_dmi_ex::dmi_nomap);
370 return first_nomap;
371 }
372 return dmi_data;
373 }
374
375 /*
376 * This is the 'special' case of IOMMU's which require an IOMMU memory region setup
377 * The IOMMU will be constructed here, but not populated - that will happen in the callback
378 * There will be a 'pair' of new regions, one to hold non iommu regions within this space,
379 * the other to hold iommu regions themselves.
380 *
381 * In extreme circumstances, if the IOMMU DMI to this region previously failed, we may have
382 * ended up with a normal DMI region here, which needs removing. We do that here, and then simply
383 * return and wait for a new access to sort things out.
384 */
385 if (u_dmi.has_dmi(gs::tlm_dmi_ex::dmi_iommu)) {
386 /* We have an IOMMU request setup an IOMMU region */
387 SCP_INFO(())("IOMMU DMI available for {:x}", trans.get_address());
388
389 /* The first mapped DMI will be the scope of the IOMMU region from our perspective */
390 tlm::tlm_dmi first_map = u_dmi.get_first(gs::tlm_dmi_ex::dmi_mapped);
391
392 uint64_t start = first_map.get_start_address();
393 uint64_t size = first_map.get_end_address() - first_map.get_start_address();
394
395 auto itr = m_mmio_mrs.find(start);
396 if (itr == m_mmio_mrs.end()) {
397 // Better check for overlapping iommu's - they must be banned !!
398
399 qemu::RcuReadLock rcu_read_lock = m_inst.get().rcu_read_lock_new();
400
401 /* invalidate any 'old' regions we happen to have mapped previously */
402 invalidate_single_range(start, start + size);
403
404 SCP_INFO(())
405 ("Adding IOMMU for VA 0x{:x} [0x{:x} - 0x{:x}]", trans.get_address(), start, start + size);
406
407 using namespace std::placeholders;
408 qemu::MemoryRegionOpsPtr ops;
409 ops = m_inst.get().memory_region_ops_new();
410 ops->set_read_callback(std::bind(&QemuInitiatorSocket::qemu_io_read, this, _1, _2, _3, _4));
411 ops->set_write_callback(std::bind(&QemuInitiatorSocket::qemu_io_write, this, _1, _2, _3, _4));
412 ops->set_max_access_size(8);
413
414 auto iommumr = std::make_shared<qemu::IOMMUMemoryRegion>(
416
417 iommumr->init(*iommumr, "dmi-manager-iommu", size, ops,
419 qemu::IOMMUMemoryRegion::IOMMUAccessFlags flags,
420 int idx) { dmi_translate(te, iommumr, start, addr, flags, idx); });
421 {
422 std::lock_guard<std::mutex> lock(m_mutex);
423 m_mmio_mrs[start] = iommumr;
424 }
425 m_r->m_root->add_subregion(*iommumr, start);
426
427 } else {
428 // Previously when looking up a TE, we failed to get the lock, so the DMI failed, we ended up in a
429 // limited passthrough. Which causes us to re-arrive here.... but, with a DMI hint. Hopefully next time
430 // the TE is looked up, we'll get the lock and re-establish the translation. In any case we should do
431 // nothing and simply return
432 // Moving to a cached TE will improve speed and prevent this from happening?
433 SCP_DEBUG(())
434 ("Memory request should be directed via MMIO interface {:x} {:x}", start, trans.get_address());
435
436 // std::lock_guard<std::mutex> lock(m_mutex);
437
438 uint64_t start_range = itr->first;
439 uint64_t end_range = itr->first + itr->second->get_size();
440
441 invalidate_direct_mem_ptr(start_range, end_range);
442 }
443 return dmi_data;
444 }
445
447 // it's ok that ShmemIDExtension is not added to trans as this should only happen when
448 // memory is a shared memory type.
449 if (shm_ext) {
450 shm_fd = shm_ext->m_fd;
451 }
452
453 SCP_INFO(()) << "DMI Adding for address 0x" << std::hex << trans.get_address();
454
455 // The upper limit is set within QEMU by the TBU
456 // e.g. 1k small pages for ARM.
457 // setting to 1/2 the size of the ARM TARGET_PAGE_SIZE,
458 // Comment from QEMU code:
459 /* The physical section number is ORed with a page-aligned
460 * pointer to produce the iotlb entries. Thus it should
461 * never overflow into the page-aligned value.
462 */
463#define MAX_MAP 250
464
465 // Current function may be called by the MMIO thread which does not hold
466 // any RCU read lock. This is required in case of a memory transaction
467 // commit on a TCG accelerated Qemu instance
468 qemu::RcuReadLock rcu_read_lock = m_inst.get().rcu_read_lock_new();
469
470 if (m_dmi_aliases.size() > MAX_MAP) {
471 SCP_FATAL(())("Too many DMI regions requested, consider using an IOMMU");
472 }
473 uint64_t start = dmi_data.get_start_address();
474 uint64_t end = dmi_data.get_end_address();
475
476 if (0 == m_dmi_aliases.count(start)) {
477 SCP_INFO(()) << "Adding DMI for range [0x" << std::hex << dmi_data.get_start_address() << "-0x" << std::hex
478 << dmi_data.get_end_address() << "]";
479
480 DmiRegionAlias::Ptr alias = m_inst.get_dmi_manager().get_new_region_alias(dmi_data, shm_fd);
481
482 m_dmi_aliases[start] = alias;
483 add_dmi_mr_alias(m_dmi_aliases[start]);
484 } else {
485 SCP_INFO(())("Already have DMI for 0x{:x}", start);
486 }
487 return dmi_data;
488 }
489
490 void check_qemu_mr_hint(TlmPayload& trans)
491 {
492 QemuMrHintTlmExtension* ext = nullptr;
494
495 trans.get_extension(ext);
496
497 if (ext == nullptr) {
498 return;
499 }
500
502
503 if (target_mr.get_inst_id() != m_dev.get_inst_id()) {
504 return;
505 }
506
507 mapping_addr = trans.get_address() - ext->get_offset();
508
510
511 mr.init_alias(m_dev, "mr-alias", target_mr, 0, target_mr.get_size());
512 m_r->m_root->add_subregion(mr, mapping_addr);
513 }
514
515 void do_regular_access(TlmPayload& trans)
516 {
517 using sc_core::sc_time;
518
519 uint64_t addr = trans.get_address();
520 sc_time now = m_initiator.initiator_get_local_time();
521
522 m_inst.get().unlock_iothread();
523 m_on_sysc.run_on_sysc([this, &trans, &now] { (*this)->b_transport(trans, now); });
524 m_inst.get().lock_iothread();
525 /*
526 * Reset transaction address before dmi check (could be altered by
527 * b_transport).
528 */
529 trans.set_address(addr);
530 check_qemu_mr_hint(trans);
531 if (trans.is_dmi_allowed()) {
533 }
534
535 m_initiator.initiator_set_local_time(now);
536 }
537
538 void do_debug_access(TlmPayload& trans)
539 {
540 m_inst.get().unlock_iothread();
541 m_on_sysc.run_on_sysc([this, &trans] { (*this)->transport_dbg(trans); });
542 m_inst.get().lock_iothread();
543 }
544
545 void do_direct_access(TlmPayload& trans)
546 {
547 sc_core::sc_time now = m_initiator.initiator_get_local_time();
548 (*this)->b_transport(trans, now);
549 }
550
551 MemTxResult qemu_io_access(tlm::tlm_command command, uint64_t addr, uint64_t* val, unsigned int size,
552 MemTxAttrs attrs)
553 {
554 TlmPayload trans;
555 if (m_finished) return qemu::MemoryRegionOps::MemTxError;
556
557 init_payload(trans, command, addr, val, size);
558
559 if (trans.get_extension<ExclusiveAccessTlmExtension>()) {
560 /* in the case of an exclusive access keep the iolock (and assume NO side-effects)
561 * clearly dangerous, but exclusives are not guaranteed to work on IO space anyway
562 */
563 do_direct_access(trans);
564 } else {
565 if (!m_inst.g_rec_qemu_io_lock.try_lock()) {
566 /* Allow only a single access, but handle re-entrant code,
567 * while allowing side-effects in SystemC (e.g. calling wait)
568 * [NB re-entrant code caused via memory listeners to
569 * creation of memory regions (due to DMI) in some models]
570 */
571 m_inst.get().unlock_iothread();
572 m_inst.g_rec_qemu_io_lock.lock();
573 m_inst.get().lock_iothread();
574 }
575 reentrancy++;
576
577 /* Force re-entrant code to use a direct access (safe for reentrancy with no side effects) */
578 if (reentrancy > 1) {
579 do_direct_access(trans);
580 } else if (attrs.debug) {
581 do_debug_access(trans);
582 } else {
583 do_regular_access(trans);
584 }
585
586 reentrancy--;
587 m_inst.g_rec_qemu_io_lock.unlock();
588 }
589 m_initiator.initiator_tidy_tlm_payload(trans);
590
591 switch (trans.get_response_status()) {
592 case tlm::TLM_OK_RESPONSE:
593 return qemu::MemoryRegionOps::MemTxOK;
594
595 case tlm::TLM_ADDRESS_ERROR_RESPONSE:
596 return qemu::MemoryRegionOps::MemTxDecodeError;
597
598 default:
599 return qemu::MemoryRegionOps::MemTxError;
600 }
601 }
602
603public:
604 MemTxResult qemu_io_read(uint64_t addr, uint64_t* val, unsigned int size, MemTxAttrs attrs)
605 {
606 return qemu_io_access(tlm::TLM_READ_COMMAND, addr, val, size, attrs);
607 }
608
609 MemTxResult qemu_io_write(uint64_t addr, uint64_t val, unsigned int size, MemTxAttrs attrs)
610 {
611 return qemu_io_access(tlm::TLM_WRITE_COMMAND, addr, &val, size, attrs);
612 }
613
615 : TlmInitiatorSocket(name)
616 , m_inst(inst)
617 , m_initiator(initiator)
618 , m_on_sysc(sc_core::sc_gen_unique_name("initiator_run_on_sysc"))
619 {
620 SCP_DEBUG(()) << "QemuInitiatorSocket constructor";
621 TlmInitiatorSocket::bind(*static_cast<tlm::tlm_bw_transport_if<>*>(this));
622 }
623
624 void init(qemu::Device& dev, const char* prop)
625 {
626 using namespace std::placeholders;
627
628 qemu::LibQemu& inst = m_inst.get();
629 qemu::MemoryRegionOpsPtr ops;
630
631 m_r = new m_mem_obj(inst); // oot = inst.object_new<qemu::MemoryRegion>();
632 ops = inst.memory_region_ops_new();
633
634 ops->set_read_callback(std::bind(&QemuInitiatorSocket::qemu_io_read, this, _1, _2, _3, _4));
635 ops->set_write_callback(std::bind(&QemuInitiatorSocket::qemu_io_write, this, _1, _2, _3, _4));
636 ops->set_max_access_size(8);
637
638 m_r->m_root->init_io(dev, TlmInitiatorSocket::name(), std::numeric_limits<uint64_t>::max(), ops);
639 dev.set_prop_link(prop, *m_r->m_root);
640
641 m_dev = dev;
642 }
643
644 void end_of_simulation()
645 {
646 m_finished = true;
647 cancel_all();
648 }
649
650 // This could happen during void end_of_simulation() but there is a race with other units trying
651 // to pull down their DMI's
653 {
654 cancel_all();
655 if (m_r) {
656 if (m_r->m_root) {
657 m_r->m_root->removeSubRegions();
658 }
659 delete m_r;
660 m_r = nullptr;
661 }
662 // dmimgr_unlock();
663 }
664
666 {
667 // this function is relatively expensive, and called a lot, it should be done a different way and removed.
668 if (m_finished) return;
669
670 SCP_DEBUG(()) << "Mapping request for address [0x" << std::hex << addr << "-0x" << addr + len << "]";
671
672 TlmPayload trans;
675 init_payload(trans, tlm::TLM_IGNORE_COMMAND, current_addr, &temp, 0);
676 trans.set_dmi_allowed(true);
677
678 while (current_addr < addr + len) {
679 tlm::tlm_dmi dmi_data = check_dmi_hint_locked(trans);
680
681 // Current addr is an absolute address while the dmi range might be relative
682 // hence not necesseraly current_addr falls withing dmi_range address boundaries
683 // TODO: is there a way to retrieve the dmi range block offset?
684 SCP_INFO(()) << "0x" << std::hex << current_addr << " mapped [0x" << dmi_data.get_start_address() << "-0x"
685 << dmi_data.get_end_address() << "]";
686
687 // The allocated range may not span the whole length required for mapping
688 assert(dmi_data.get_end_address() > current_addr);
689 current_addr = dmi_data.get_end_address();
690 if (current_addr >= addr + len) break; // Catch potential loop-rounds
691 current_addr += 1;
692 trans.set_address(current_addr);
693 }
694
695 m_initiator.initiator_tidy_tlm_payload(trans);
696 }
697
698 void init_global(qemu::Device& dev)
699 {
700 using namespace std::placeholders;
701
702 qemu::LibQemu& inst = m_inst.get();
703 qemu::MemoryRegionOpsPtr ops;
704 ops = inst.memory_region_ops_new();
705
706 ops->set_read_callback(std::bind(&QemuInitiatorSocket::qemu_io_read, this, _1, _2, _3, _4));
707 ops->set_write_callback(std::bind(&QemuInitiatorSocket::qemu_io_write, this, _1, _2, _3, _4));
708 ops->set_max_access_size(8);
709
710 auto system_memory = inst.get_system_memory();
711 system_memory->init_io(dev, TlmInitiatorSocket::name(), std::numeric_limits<uint64_t>::max() - 1, ops);
712 m_r = new m_mem_obj(std::move(system_memory));
713
714 m_as = inst.address_space_get_system_memory();
715 // System memory has been changed from container to "io", this is relevant
716 // for flatview, and to reflect that we can just update the topology
717 m_as->update_topology();
718
719 m_listener = inst.memory_listener_new();
720 m_listener->set_map_callback(std::bind(&QemuInitiatorSocket::qemu_map, this, _1, _2, _3));
721 m_listener->register_as(m_as);
722
723 m_dev = dev;
724 }
725
726 void cancel_all() { m_on_sysc.cancel_all(); }
727
728 /* tlm::tlm_bw_transport_if<> */
729 virtual tlm::tlm_sync_enum nb_transport_bw(tlm::tlm_generic_payload& trans, tlm::tlm_phase& phase,
730 sc_core::sc_time& t)
731 {
732 /* Should not be reached */
733 assert(false);
734 return tlm::TLM_COMPLETED;
735 }
736
737 virtual AliasesIterator remove_alias(AliasesIterator it)
738 {
739 DmiRegionAlias::Ptr r = it->second; /*
740 * Invalidate this region. Do not bother with
741 * partial invalidation as it's really not worth
742 * it. Better let the target model returns sub-DMI
743 * regions during future accesses.
744 */
745
746 /*
747 * Mark the whole region this alias maps to as invalid. This has
748 * the effect of marking all the other aliases mapping to the same
749 * region as invalid too. If a DMI request for the same region is
750 * already in progress, it will have a chance to detect it is now
751 * invalid before mapping it on the QEMU root MR (see
752 * check_dmi_hint comment).
753 */
754 // r->invalidate_region();
755
756 assert(r->is_installed());
757 // if (!r->is_installed()) {
758 /*
759 * The alias is not mapped onto the QEMU root MR yet. Simply
760 * skip it. It will be removed from m_dmi_aliases by
761 * check_dmi_hint.
762 */
763 // return it++;
764 // }
765
766 /*
767 * Remove the alias from the root MR. This is enough to perform
768 * required invalidations on QEMU's side in a thread-safe manner.
769 */
770 del_dmi_mr_alias(r);
771
772 /*
773 * Remove the alias from the collection. The DmiRegionAlias object
774 * is then destructed, leading to the destruction of the DmiRegion
775 * shared pointer it contains. When no more alias reference this
776 * region, it is in turn destructed, effectively destroying the
777 * corresponding memory region in QEMU.
778 */
779 return m_dmi_aliases.erase(it);
780 }
781
782private:
783 void invalidate_single_range(sc_dt::uint64 start_range, sc_dt::uint64 end_range)
784 {
785 auto it = m_dmi_aliases.upper_bound(start_range);
786
787 if (it != m_dmi_aliases.begin()) {
788 /*
789 * Start with the preceding region, as it may already cross the
790 * range we must invalidate.
791 */
792 it--;
793 }
794 while (it != m_dmi_aliases.end()) {
795 DmiRegionAlias::Ptr r = it->second;
796
797 if (r->get_start() > end_range) {
798 /* We've got out of the invalidation range */
799 break;
800 }
801
802 if (r->get_end() < start_range) {
803 /* We are not in yet */
804 it++;
805 continue;
806 }
807
808 it = remove_alias(it);
809
810 SCP_DEBUG(()) << "Invalidated region [0x" << std::hex << r->get_start() << ", 0x" << std::hex
811 << r->get_end() << "]";
812 }
813 }
814
815 void invalidate_ranges_safe_cb()
816 {
817 std::lock_guard<std::mutex> lock(m_mutex);
818
819 SCP_DEBUG(()) << "Invalidating " << m_ranges.size() << " ranges";
820 auto rit = m_ranges.begin();
821 while (rit != m_ranges.end()) {
822 invalidate_single_range(rit->first, rit->second);
823 rit = m_ranges.erase(rit);
824 }
825 }
826
827public:
828 virtual void invalidate_direct_mem_ptr(sc_dt::uint64 start_range, sc_dt::uint64 end_range)
829 {
830 if (m_finished) return;
831 SCP_DEBUG(()) << "DMI invalidate [0x" << std::hex << start_range << ", 0x" << std::hex << end_range << "]";
832
833 {
834 std::lock_guard<std::mutex> lock(m_mutex);
835
836 for (auto m : m_mmio_mrs) {
837 auto mr_start = m.first;
838 auto mr_end = m.first + m.second->get_size();
839 if ((mr_start >= start_range && mr_start <= end_range) ||
841 for (auto it = m.second->m_mapped_te.begin(); it != m.second->m_mapped_te.end();) {
842#ifdef USE_UNORD
843 if ((it->first << m.second->min_page_sz) + mr_start >= start_range &&
844 (it->first << m.second->min_page_sz) + mr_start < end_range) {
845#else
846 if (it->first + mr_start >= start_range && it->first + mr_start < end_range) {
847#endif
848 m.second->iommu_unmap(&(it->second));
849 it = m.second->m_mapped_te.erase(it);
850 } else
851 it++;
852 }
853 return; // If we found this, then we're done. Overlapping IOMMU's are not allowed.
854 }
855 }
856 }
857 {
858 std::lock_guard<std::mutex> lock(m_mutex);
859 m_ranges.push_back(std::make_pair(start_range, end_range));
860 }
861
862 m_initiator.initiator_async_run([&]() { invalidate_ranges_safe_cb(); });
863
864 /* For 7.2 this may need to be safe aync work ???????? */
865 }
866
867 virtual void reset()
868 {
869 std::lock_guard<std::mutex> lock(m_mutex);
870
871 for (auto m : m_mmio_mrs) {
872 m.second->m_mapped_te.clear();
873 auto it = m_dmi_aliases.begin();
874 while (it != m_dmi_aliases.end()) {
875 DmiRegionAlias::Ptr r = it->second;
876 it = remove_alias(it);
877 }
878 }
879 }
880};
881
882#endif
Exclusive load/store TLM extension.
Definition exclusive-access.h:36
Definition initiator.h:38
Definition initiator.h:95
TLM-2.0 initiator socket specialisation for QEMU AddressSpace mapping.
Definition initiator.h:63
tlm::tlm_dmi check_dmi_hint_locked(TlmPayload &trans)
Request a DMI region, ask the QEMU instance DMI manager for a DMI region alias for it and map it on t...
Definition initiator.h:344
void dmi_translate(qemu::IOMMUMemoryRegion::IOMMUTLBEntry *te, std::shared_ptr< qemu::IOMMUMemoryRegion > iommumr, uint64_t base_addr, uint64_t addr, qemu::IOMMUMemoryRegion::IOMMUAccessFlags flags, int idx)
Use DMI data to set up a qemu IOMMU translate.
Definition initiator.h:149
An alias to a DMI region.
Definition dmi-manager.h:140
a DMI region
Definition dmi-manager.h:77
DmiRegionAlias::Ptr get_new_region_alias(const tlm::tlm_dmi &info, int fd=-1)
Create a new alias for the DMI region designated by info
Definition dmi-manager.h:283
This class encapsulates a libqemu-cxx qemu::LibQemu instance. It handles QEMU parameters and instance...
Definition qemu-instance.h:89
qemu::LibQemu & get()
Returns the underlying qemu::LibQemu instance.
Definition qemu-instance.h:448
QemuInstanceDmiManager & get_dmi_manager()
Returns the locked QemuInstanceDmiManager instance.
Definition qemu-instance.h:464
Definition qemu-mr-hint.h:17
Definition target.h:160
Definition shmem_extension.h:24
Definition underlying-dmi.h:34
Definition runonsysc.h:22
void cancel_all()
Cancel all pending and running jobs.
Definition runonsysc.h:147
bool run_on_sysc(std::function< void()> job_entry, bool wait=true)
Run a job on the SystemC kernel thread.
Definition runonsysc.h:181
Definition libqemu-cxx.h:616
Definition libqemu-cxx.h:92
Definition libqemu-cxx.h:492
Definition libqemu-cxx.h:362
Definition libqemu-cxx.h:213
Definition libqemu-cxx.h:471
Definition libqemu-cxx.h:325