quic/qbox
Loading...
Searching...
No Matches
initiator.h
1/*
2 * This file is part of libqbox
3 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * Author: GreenSocs 2021
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9#ifndef _LIBQBOX_PORTS_INITIATOR_H
10#define _LIBQBOX_PORTS_INITIATOR_H
11
12#include <functional>
13#include <limits>
14#include <cassert>
15#include <cinttypes>
16
17#include <tlm>
18
19#include <libqemu-cxx/libqemu-cxx.h>
20
21#include <libgssync.h>
22
23#include <scp/report.h>
24
25#include <qemu-instance.h>
26#include <tlm-extensions/qemu-mr-hint.h>
27#include <tlm-extensions/exclusive-access.h>
28#include <tlm-extensions/shmem_extension.h>
29#include <tlm-extensions/underlying-dmi.h>
30#include <tlm_sockets_buswidth.h>
31
32/*
33 * Define this as 1 if you want to enable the cache debug mechanism below.
34 */
35#define DEBUG_CACHE 0
36
38{
39public:
40 using TlmPayload = tlm::tlm_generic_payload;
41
42 virtual void initiator_customize_tlm_payload(TlmPayload& payload) = 0;
43 virtual void initiator_tidy_tlm_payload(TlmPayload& payload) = 0;
44 virtual sc_core::sc_time initiator_get_local_time() = 0;
45 virtual void initiator_set_local_time(const sc_core::sc_time&) = 0;
46 virtual void initiator_async_run(qemu::Cpu::AsyncJobFn job) = 0;
47};
48
59template <unsigned int BUSWIDTH = DEFAULT_TLM_BUSWIDTH>
61 : public tlm::tlm_initiator_socket<BUSWIDTH, tlm::tlm_base_protocol_types, 1, sc_core::SC_ZERO_OR_MORE_BOUND>,
62 public tlm::tlm_bw_transport_if<>
63{
64private:
65 std::mutex m_mutex;
66 std::vector<std::pair<sc_dt::uint64, sc_dt::uint64>> m_ranges;
67
68public:
69 SCP_LOGGER(());
70
71 using TlmInitiatorSocket = tlm::tlm_initiator_socket<BUSWIDTH, tlm::tlm_base_protocol_types, 1,
72 sc_core::SC_ZERO_OR_MORE_BOUND>;
73 using TlmPayload = tlm::tlm_generic_payload;
74 using MemTxResult = qemu::MemoryRegionOps::MemTxResult;
78
80
81protected:
82 QemuInstance& m_inst;
83 QemuInitiatorIface& m_initiator;
84 qemu::Device m_dev;
85 gs::runonsysc m_on_sysc;
86 int reentrancy = 0;
87
88 bool m_finished = false;
89
90 std::shared_ptr<qemu::AddressSpace> m_as;
91 std::shared_ptr<qemu::MemoryListener> m_listener;
92 std::map<uint64_t, std::shared_ptr<qemu::IOMMUMemoryRegion>> m_mmio_mrs;
93
95 {
96 public:
97 std::shared_ptr<qemu::MemoryRegion> m_root;
98 m_mem_obj(qemu::LibQemu& inst) { m_root.reset(new qemu::MemoryRegion(inst.object_new<qemu::MemoryRegion>())); }
99 m_mem_obj(std::shared_ptr<qemu::MemoryRegion> memory): m_root(std::move(memory)) {}
100 };
101 m_mem_obj* m_r = nullptr;
102
103 // we use an ordered map to find and combine elements
104 std::map<DmiRegionAliasKey, DmiRegionAlias::Ptr> m_dmi_aliases;
105 using AliasesIterator = std::map<DmiRegionAliasKey, DmiRegionAlias::Ptr>::iterator;
106
107 void init_payload(TlmPayload& trans, tlm::tlm_command command, uint64_t addr, uint64_t* val, unsigned int size)
108 {
109 trans.set_command(command);
110 trans.set_address(addr);
111 trans.set_data_ptr(reinterpret_cast<unsigned char*>(val));
112 trans.set_data_length(size);
113 trans.set_streaming_width(size);
114 trans.set_byte_enable_length(0);
115 trans.set_dmi_allowed(false);
116 trans.set_response_status(tlm::TLM_INCOMPLETE_RESPONSE);
117
118 m_initiator.initiator_customize_tlm_payload(trans);
119 }
120
121 void add_dmi_mr_alias(DmiRegionAlias::Ptr alias)
122 {
123 SCP_INFO(()) << "Adding " << *alias;
124 qemu::MemoryRegion alias_mr = alias->get_alias_mr();
125 m_r->m_root->add_subregion(alias_mr, alias->get_start());
126 alias->set_installed();
127 }
128
129 void del_dmi_mr_alias(const DmiRegionAlias::Ptr alias)
130 {
131 if (!alias->is_installed()) {
132 return;
133 }
134 SCP_INFO(()) << "Removing " << *alias;
135 m_r->m_root->del_subregion(alias->get_alias_mr());
136 }
137
149 void dmi_translate(qemu::IOMMUMemoryRegion::IOMMUTLBEntry* te, std::shared_ptr<qemu::IOMMUMemoryRegion> iommumr,
150 uint64_t base_addr, uint64_t addr, qemu::IOMMUMemoryRegion::IOMMUAccessFlags flags, int idx)
151 {
152 TlmPayload ltrans;
154#if DEBUG_CACHE
155 bool incache = false;
157#endif // DEBUG_CACHE
158
159 /*
160 * Fast path : check to see if the TE is already cached, if so return it straight away.
161 */
162 {
163 // Really wish we didn't need the lock here
164 std::lock_guard<std::mutex> lock(m_mutex);
165
166 auto m = iommumr->m_mapped_te.find(addr >> iommumr->min_page_sz);
167 if (m != iommumr->m_mapped_te.end()) {
168 *te = m->second;
169 SCP_TRACE(())
170 ("FAST (unord) translate for 0x{:x} : 0x{:x}->0x{:x} (mask 0x{:x}) perm={}", addr, te->iova,
171 te->translated_addr, te->addr_mask, te->perm);
172 return;
173 }
174 }
175
176 /*
177 * Slow path, use DMI to investigate the memory, and see what sort of TE we can set up
178 *
179 * There are 3 options
180 * 1/ a real IOMMU region that should be mapped into the IOMMU address space
181 * 2/ a 'dmi-able' region which is not an IOMMU (e.g. local memory)
182 * 3/ a 'non-dmi-able' object (e.g. an MMIO device) - a minimum page size will be used for this.
183 *
184 * Enable DEBUG_CACHE to see if the fast path should have been used.
185 */
186
187 // construct maximal mask.
188 uint64_t start_msk = (base_addr ^ (base_addr - 1)) >> 1;
189
190 SCP_DEBUG(())("Doing Translate for {:x} (Absolute 0x{:x})", addr, addr + base_addr);
191
193 init_payload(ltrans, tlm::TLM_IGNORE_COMMAND, base_addr + addr, &tmp, 0);
194 ltrans.set_extension(&lu_dmi);
195 tlm::tlm_dmi ldmi_data;
196
197 if ((*this)->get_direct_mem_ptr(ltrans, ldmi_data)) {
198 if (lu_dmi.has_dmi(gs::tlm_dmi_ex::dmi_iommu)) {
199 // Add te to 'special' IOMMU address space
200 tlm::tlm_dmi lu_dmi_data = lu_dmi.get_last(gs::tlm_dmi_ex::dmi_iommu);
201 if (0 == iommumr->m_dmi_aliases_te.count(lu_dmi_data.get_start_address())) {
202 qemu::RcuReadLock l_rcu_read_lock = m_inst.get().rcu_read_lock_new();
203 // take our own memory here, dont use an alias as
204 // we may have different sizes for the underlying DMI
205
206 DmiRegion region = DmiRegion(lu_dmi_data, 0, m_inst.get());
207 SCP_DEBUG(())
208 ("Adding IOMMU DMI Region start 0x{:x} - 0x{:x}", lu_dmi_data.get_start_address(),
209 lu_dmi_data.get_start_address() + region.get_size());
210 iommumr->m_root_te.add_subregion(region.get_mut_mr(), lu_dmi_data.get_start_address());
211 iommumr->m_dmi_aliases_te[lu_dmi_data.get_start_address()] = std::make_shared<DmiRegion>(region);
212 }
213
214 te->target_as = iommumr->m_as_te->get_ptr();
215 te->addr_mask = ldmi_data.get_end_address() - ldmi_data.get_start_address();
216 te->iova = addr;
217 te->translated_addr = (lu_dmi_data.get_start_address() +
218 (ldmi_data.get_dmi_ptr() - lu_dmi_data.get_dmi_ptr()));
219 te->perm = (qemu::IOMMUMemoryRegion::IOMMUAccessFlags)ldmi_data.get_granted_access();
220
221 SCP_DEBUG(())
222 ("Translate IOMMU 0x{:x}->0x{:x} (mask 0x{:x})", te->iova, te->translated_addr, te->addr_mask);
223
224 } else {
225 // no underlying DMI, add a 1-1 passthrough to normal address space
226 if (0 == iommumr->m_dmi_aliases_io.count(ldmi_data.get_start_address())) {
227 qemu::RcuReadLock l_rcu_read_lock = m_inst.get().rcu_read_lock_new();
228 DmiRegionAlias::Ptr alias = m_inst.get_dmi_manager().get_new_region_alias(ldmi_data);
229 SCP_DEBUG(()) << "Adding DMI Region alias " << *alias;
230 qemu::MemoryRegion alias_mr = alias->get_alias_mr();
231 iommumr->m_root.add_subregion(alias_mr, alias->get_start());
232 alias->set_installed();
233 iommumr->m_dmi_aliases_io[alias->get_start()] = alias;
234 }
235
236 te->target_as = iommumr->m_as->get_ptr();
237 te->addr_mask = start_msk;
238 te->iova = addr & ~start_msk;
239 te->translated_addr = (addr & ~start_msk) + base_addr;
240 te->perm = (qemu::IOMMUMemoryRegion::IOMMUAccessFlags)ldmi_data.get_granted_access();
241
242 SCP_DEBUG(())
243 ("Translate 1-1 passthrough 0x{:x}->0x{:x} (mask 0x{:x})", te->iova, te->translated_addr,
244 te->addr_mask);
245 }
246#if DEBUG_CACHE
247 if (incache) {
248 SCP_WARN(())("Could have used the cache! {:x}\n", addr);
249 assert(te->iova == tmpte.iova);
250 assert(te->target_as == tmpte.target_as);
251 assert(te->addr_mask == tmpte.addr_mask);
252 assert(te->translated_addr == tmpte.translated_addr);
253 assert(te->perm == tmpte.perm);
254 }
255#endif // DEBUG_CACHE
256 std::lock_guard<std::mutex> lock(m_mutex);
257 iommumr->m_mapped_te[(addr & ~te->addr_mask) >> iommumr->min_page_sz] = *te;
258 SCP_DEBUG(())
259 ("Caching TE at addr 0x{:x} (mask {:x})", addr & ~te->addr_mask, te->addr_mask);
260
261 } else {
262 // No DMI at all, either an MMIO, or a DMI failure, setup for a 1-1 translation for the minimal page
263 // in the normal address space
264
265 te->target_as = iommumr->m_as->get_ptr();
266 te->addr_mask = (1 << iommumr->min_page_sz) - 1;
267 te->iova = addr & ~te->addr_mask;
268 te->translated_addr = (addr & ~te->addr_mask) + base_addr;
269 te->perm = qemu::IOMMUMemoryRegion::IOMMU_RW;
270
271 SCP_DEBUG(())
272 ("Translate 1-1 limited passthrough 0x{:x}->0x{:x} (mask 0x{:x})", te->iova, te->translated_addr,
273 te->addr_mask);
274 }
275 ltrans.clear_extension(&lu_dmi);
276 }
277
318 tlm::tlm_dmi check_dmi_hint_locked(TlmPayload& trans)
319 {
320 assert(trans.is_dmi_allowed());
321 tlm::tlm_dmi dmi_data;
322 int shm_fd = -1;
323
324 SCP_INFO(()) << "DMI request for address 0x" << std::hex << trans.get_address();
325
326 // It is 'safer' from the SystemC perspective to m_on_sysc.run_on_sysc([this,
327 // &trans]{...}).
329
330 trans.set_extension(&u_dmi);
331 bool dmi_valid = (*this)->get_direct_mem_ptr(trans, dmi_data);
332 trans.clear_extension(&u_dmi);
333 if (!dmi_valid) {
334 SCP_INFO(())("No DMI available for {:x}", trans.get_address());
335 /* this is used by the map function below
336 * - a better plan may be to tag memories to be mapped so we dont need this
337 */
338 if (u_dmi.has_dmi(gs::tlm_dmi_ex::dmi_mapped)) {
339 tlm::tlm_dmi first_map = u_dmi.get_first(gs::tlm_dmi_ex::dmi_mapped);
340 return first_map;
341 }
342 if (u_dmi.has_dmi(gs::tlm_dmi_ex::dmi_nomap)) {
343 tlm::tlm_dmi first_nomap = u_dmi.get_first(gs::tlm_dmi_ex::dmi_nomap);
344 return first_nomap;
345 }
346 return dmi_data;
347 }
348
349 /*
350 * This is the 'special' case of IOMMU's which require an IOMMU memory region setup
351 * The IOMMU will be constructed here, but not populated - that will happen in the callback
352 * There will be a 'pair' of new regions, one to hold non iommu regions within this space,
353 * the other to hold iommu regions themselves.
354 *
355 * In extreme circumstances, if the IOMMU DMI to this region previously failed, we may have
356 * ended up with a normal DMI region here, which needs removing. We do that here, and then simply
357 * return and wait for a new access to sort things out.
358 */
359 if (u_dmi.has_dmi(gs::tlm_dmi_ex::dmi_iommu)) {
360 /* We have an IOMMU request setup an IOMMU region */
361 SCP_INFO(())("IOMMU DMI available for {:x}", trans.get_address());
362
363 /* The first mapped DMI will be the scope of the IOMMU region from our perspective */
364 tlm::tlm_dmi first_map = u_dmi.get_first(gs::tlm_dmi_ex::dmi_mapped);
365
366 uint64_t start = first_map.get_start_address();
367 uint64_t size = first_map.get_end_address() - first_map.get_start_address();
368
369 auto itr = m_mmio_mrs.find(start);
370 if (itr == m_mmio_mrs.end()) {
371 // Better check for overlapping iommu's - they must be banned !!
372
373 qemu::RcuReadLock rcu_read_lock = m_inst.get().rcu_read_lock_new();
374
375 /* invalidate any 'old' regions we happen to have mapped previously */
376 invalidate_single_range(start, start + size);
377
378 SCP_INFO(())
379 ("Adding IOMMU for VA 0x{:x} [0x{:x} - 0x{:x}]", trans.get_address(), start, start + size);
380
381 using namespace std::placeholders;
382 qemu::MemoryRegionOpsPtr ops;
383 ops = m_inst.get().memory_region_ops_new();
384 ops->set_read_callback(std::bind(&QemuInitiatorSocket::qemu_io_read, this, _1, _2, _3, _4));
385 ops->set_write_callback(std::bind(&QemuInitiatorSocket::qemu_io_write, this, _1, _2, _3, _4));
386 ops->set_max_access_size(8);
387
388 auto iommumr = std::make_shared<qemu::IOMMUMemoryRegion>(
390
391 iommumr->init(*iommumr, "dmi-manager-iommu", size, ops,
393 qemu::IOMMUMemoryRegion::IOMMUAccessFlags flags,
394 int idx) { dmi_translate(te, iommumr, start, addr, flags, idx); });
395 {
396 std::lock_guard<std::mutex> lock(m_mutex);
397 m_mmio_mrs[start] = iommumr;
398 }
399 m_r->m_root->add_subregion(*iommumr, start);
400
401 } else {
402 // Previously when looking up a TE, we failed to get the lock, so the DMI failed, we ended up in a
403 // limited passthrough. Which causes us to re-arrive here.... but, with a DMI hint. Hopefully next time
404 // the TE is looked up, we'll get the lock and re-establish the translation. In any case we should do
405 // nothing and simply return
406 // Moving to a cached TE will improve speed and prevent this from happening?
407 SCP_DEBUG(())
408 ("Memory request should be directed via MMIO interface {:x} {:x}", start, trans.get_address());
409
410 // std::lock_guard<std::mutex> lock(m_mutex);
411
412 uint64_t start_range = itr->first;
413 uint64_t end_range = itr->first + itr->second->get_size();
414
415 invalidate_direct_mem_ptr(start_range, end_range);
416 }
417 return dmi_data;
418 }
419
421 // it's ok that ShmemIDExtension is not added to trans as this should only happen when
422 // memory is a shared memory type.
423 if (shm_ext) {
424 shm_fd = shm_ext->m_fd;
425 }
426
427 SCP_INFO(()) << "DMI Adding for address 0x" << std::hex << trans.get_address();
428
429 // The upper limit is set within QEMU by the TBU
430 // e.g. 1k small pages for ARM.
431 // setting to 1/2 the size of the ARM TARGET_PAGE_SIZE,
432 // Comment from QEMU code:
433 /* The physical section number is ORed with a page-aligned
434 * pointer to produce the iotlb entries. Thus it should
435 * never overflow into the page-aligned value.
436 */
437#define MAX_MAP 250
438
439 // Current function may be called by the MMIO thread which does not hold
440 // any RCU read lock. This is required in case of a memory transaction
441 // commit on a TCG accelerated Qemu instance
442 qemu::RcuReadLock rcu_read_lock = m_inst.get().rcu_read_lock_new();
443
444 if (m_dmi_aliases.size() > MAX_MAP) {
445 SCP_FATAL(())("Too many DMI regions requested, consider using an IOMMU");
446 }
447 uint64_t start = dmi_data.get_start_address();
448 uint64_t end = dmi_data.get_end_address();
449
450 if (0 == m_dmi_aliases.count(start)) {
451 SCP_INFO(()) << "Adding DMI for range [0x" << std::hex << dmi_data.get_start_address() << "-0x" << std::hex
452 << dmi_data.get_end_address() << "]";
453
454 DmiRegionAlias::Ptr alias = m_inst.get_dmi_manager().get_new_region_alias(dmi_data, shm_fd);
455
456 m_dmi_aliases[start] = alias;
457 add_dmi_mr_alias(m_dmi_aliases[start]);
458 } else {
459 SCP_INFO(())("Already have DMI for 0x{:x}", start);
460 }
461 return dmi_data;
462 }
463
464 void check_qemu_mr_hint(TlmPayload& trans)
465 {
466 QemuMrHintTlmExtension* ext = nullptr;
468
469 trans.get_extension(ext);
470
471 if (ext == nullptr) {
472 return;
473 }
474
476
477 if (target_mr.get_inst_id() != m_dev.get_inst_id()) {
478 return;
479 }
480
481 mapping_addr = trans.get_address() - ext->get_offset();
482
484
485 mr.init_alias(m_dev, "mr-alias", target_mr, 0, target_mr.get_size());
486 m_r->m_root->add_subregion(mr, mapping_addr);
487 }
488
489 void do_regular_access(TlmPayload& trans)
490 {
491 using sc_core::sc_time;
492
493 uint64_t addr = trans.get_address();
494 sc_time now = m_initiator.initiator_get_local_time();
495
496 m_inst.get().unlock_iothread();
497 m_on_sysc.run_on_sysc([this, &trans, &now] { (*this)->b_transport(trans, now); });
498 m_inst.get().lock_iothread();
499 /*
500 * Reset transaction address before dmi check (could be altered by
501 * b_transport).
502 */
503 trans.set_address(addr);
504 check_qemu_mr_hint(trans);
505 if (trans.is_dmi_allowed()) {
507 }
508
509 m_initiator.initiator_set_local_time(now);
510 }
511
512 void do_debug_access(TlmPayload& trans)
513 {
514 m_inst.get().unlock_iothread();
515 m_on_sysc.run_on_sysc([this, &trans] { (*this)->transport_dbg(trans); });
516 m_inst.get().lock_iothread();
517 }
518
519 void do_direct_access(TlmPayload& trans)
520 {
521 sc_core::sc_time now = m_initiator.initiator_get_local_time();
522 (*this)->b_transport(trans, now);
523 }
524
525 MemTxResult qemu_io_access(tlm::tlm_command command, uint64_t addr, uint64_t* val, unsigned int size,
526 MemTxAttrs attrs)
527 {
528 TlmPayload trans;
529 if (m_finished) return qemu::MemoryRegionOps::MemTxError;
530
531 init_payload(trans, command, addr, val, size);
532
533 if (trans.get_extension<ExclusiveAccessTlmExtension>()) {
534 /* in the case of an exclusive access keep the iolock (and assume NO side-effects)
535 * clearly dangerous, but exclusives are not guaranteed to work on IO space anyway
536 */
537 do_direct_access(trans);
538 } else {
539 if (!m_inst.g_rec_qemu_io_lock.try_lock()) {
540 /* Allow only a single access, but handle re-entrant code,
541 * while allowing side-effects in SystemC (e.g. calling wait)
542 * [NB re-entrant code caused via memory listeners to
543 * creation of memory regions (due to DMI) in some models]
544 */
545 m_inst.get().unlock_iothread();
546 m_inst.g_rec_qemu_io_lock.lock();
547 m_inst.get().lock_iothread();
548 }
549 reentrancy++;
550
551 /* Force re-entrant code to use a direct access (safe for reentrancy with no side effects) */
552 if (reentrancy > 1) {
553 do_direct_access(trans);
554 } else if (attrs.debug) {
555 do_debug_access(trans);
556 } else {
557 do_regular_access(trans);
558 }
559
560 reentrancy--;
561 m_inst.g_rec_qemu_io_lock.unlock();
562 }
563 m_initiator.initiator_tidy_tlm_payload(trans);
564
565 switch (trans.get_response_status()) {
566 case tlm::TLM_OK_RESPONSE:
567 return qemu::MemoryRegionOps::MemTxOK;
568
569 case tlm::TLM_ADDRESS_ERROR_RESPONSE:
570 return qemu::MemoryRegionOps::MemTxDecodeError;
571
572 default:
573 return qemu::MemoryRegionOps::MemTxError;
574 }
575 }
576
577public:
578 MemTxResult qemu_io_read(uint64_t addr, uint64_t* val, unsigned int size, MemTxAttrs attrs)
579 {
580 return qemu_io_access(tlm::TLM_READ_COMMAND, addr, val, size, attrs);
581 }
582
583 MemTxResult qemu_io_write(uint64_t addr, uint64_t val, unsigned int size, MemTxAttrs attrs)
584 {
585 return qemu_io_access(tlm::TLM_WRITE_COMMAND, addr, &val, size, attrs);
586 }
587
589 : TlmInitiatorSocket(name)
590 , m_inst(inst)
591 , m_initiator(initiator)
592 , m_on_sysc(sc_core::sc_gen_unique_name("initiator_run_on_sysc"))
593 {
594 SCP_DEBUG(()) << "QemuInitiatorSocket constructor";
595 TlmInitiatorSocket::bind(*static_cast<tlm::tlm_bw_transport_if<>*>(this));
596 }
597
598 void init(qemu::Device& dev, const char* prop)
599 {
600 using namespace std::placeholders;
601
602 qemu::LibQemu& inst = m_inst.get();
603 qemu::MemoryRegionOpsPtr ops;
604
605 m_r = new m_mem_obj(inst); // oot = inst.object_new<qemu::MemoryRegion>();
606 ops = inst.memory_region_ops_new();
607
608 ops->set_read_callback(std::bind(&QemuInitiatorSocket::qemu_io_read, this, _1, _2, _3, _4));
609 ops->set_write_callback(std::bind(&QemuInitiatorSocket::qemu_io_write, this, _1, _2, _3, _4));
610 ops->set_max_access_size(8);
611
612 m_r->m_root->init_io(dev, TlmInitiatorSocket::name(), std::numeric_limits<uint64_t>::max(), ops);
613 dev.set_prop_link(prop, *m_r->m_root);
614
615 m_dev = dev;
616 }
617
618 void end_of_simulation()
619 {
620 m_finished = true;
621 cancel_all();
622 }
623
624 // This could happen during void end_of_simulation() but there is a race with other units trying
625 // to pull down their DMI's
627 {
628 cancel_all();
629 if (m_r) {
630 if (m_r->m_root) {
631 m_r->m_root->removeSubRegions();
632 }
633 delete m_r;
634 m_r = nullptr;
635 }
636 // dmimgr_unlock();
637 }
638
640 {
641 // this function is relatively expensive, and called a lot, it should be done a different way and removed.
642 if (m_finished) return;
643
644 SCP_DEBUG(()) << "Mapping request for address [0x" << std::hex << addr << "-0x" << addr + len << "]";
645
646 TlmPayload trans;
649 init_payload(trans, tlm::TLM_IGNORE_COMMAND, current_addr, &temp, 0);
650 trans.set_dmi_allowed(true);
651
652 while (current_addr < addr + len) {
653 tlm::tlm_dmi dmi_data = check_dmi_hint_locked(trans);
654
655 // Current addr is an absolute address while the dmi range might be relative
656 // hence not necesseraly current_addr falls withing dmi_range address boundaries
657 // TODO: is there a way to retrieve the dmi range block offset?
658 SCP_INFO(()) << "0x" << std::hex << current_addr << " mapped [0x" << dmi_data.get_start_address() << "-0x"
659 << dmi_data.get_end_address() << "]";
660
661 // The allocated range may not span the whole length required for mapping
662 assert(dmi_data.get_end_address() > current_addr);
663 current_addr = dmi_data.get_end_address();
664 if (current_addr >= addr + len) break; // Catch potential loop-rounds
665 current_addr += 1;
666 trans.set_address(current_addr);
667 }
668
669 m_initiator.initiator_tidy_tlm_payload(trans);
670 }
671
672 void init_global(qemu::Device& dev)
673 {
674 using namespace std::placeholders;
675
676 qemu::LibQemu& inst = m_inst.get();
677 qemu::MemoryRegionOpsPtr ops;
678 ops = inst.memory_region_ops_new();
679
680 ops->set_read_callback(std::bind(&QemuInitiatorSocket::qemu_io_read, this, _1, _2, _3, _4));
681 ops->set_write_callback(std::bind(&QemuInitiatorSocket::qemu_io_write, this, _1, _2, _3, _4));
682 ops->set_max_access_size(8);
683
684 auto system_memory = inst.get_system_memory();
685 system_memory->init_io(dev, TlmInitiatorSocket::name(), std::numeric_limits<uint64_t>::max() - 1, ops);
686 m_r = new m_mem_obj(std::move(system_memory));
687
688 m_as = inst.address_space_get_system_memory();
689 // System memory has been changed from container to "io", this is relevant
690 // for flatview, and to reflect that we can just update the topology
691 m_as->update_topology();
692
693 m_listener = inst.memory_listener_new();
694 m_listener->set_map_callback(std::bind(&QemuInitiatorSocket::qemu_map, this, _1, _2, _3));
695 m_listener->register_as(m_as);
696
697 m_dev = dev;
698 }
699
700 void cancel_all() { m_on_sysc.cancel_all(); }
701
702 /* tlm::tlm_bw_transport_if<> */
703 virtual tlm::tlm_sync_enum nb_transport_bw(tlm::tlm_generic_payload& trans, tlm::tlm_phase& phase,
704 sc_core::sc_time& t)
705 {
706 /* Should not be reached */
707 assert(false);
708 return tlm::TLM_COMPLETED;
709 }
710
711 virtual AliasesIterator remove_alias(AliasesIterator it)
712 {
713 DmiRegionAlias::Ptr r = it->second; /*
714 * Invalidate this region. Do not bother with
715 * partial invalidation as it's really not worth
716 * it. Better let the target model returns sub-DMI
717 * regions during future accesses.
718 */
719
720 /*
721 * Mark the whole region this alias maps to as invalid. This has
722 * the effect of marking all the other aliases mapping to the same
723 * region as invalid too. If a DMI request for the same region is
724 * already in progress, it will have a chance to detect it is now
725 * invalid before mapping it on the QEMU root MR (see
726 * check_dmi_hint comment).
727 */
728 // r->invalidate_region();
729
730 assert(r->is_installed());
731 // if (!r->is_installed()) {
732 /*
733 * The alias is not mapped onto the QEMU root MR yet. Simply
734 * skip it. It will be removed from m_dmi_aliases by
735 * check_dmi_hint.
736 */
737 // return it++;
738 // }
739
740 /*
741 * Remove the alias from the root MR. This is enough to perform
742 * required invalidations on QEMU's side in a thread-safe manner.
743 */
744 del_dmi_mr_alias(r);
745
746 /*
747 * Remove the alias from the collection. The DmiRegionAlias object
748 * is then destructed, leading to the destruction of the DmiRegion
749 * shared pointer it contains. When no more alias reference this
750 * region, it is in turn destructed, effectively destroying the
751 * corresponding memory region in QEMU.
752 */
753 return m_dmi_aliases.erase(it);
754 }
755
756private:
757 void invalidate_single_range(sc_dt::uint64 start_range, sc_dt::uint64 end_range)
758 {
759 auto it = m_dmi_aliases.upper_bound(start_range);
760
761 if (it != m_dmi_aliases.begin()) {
762 /*
763 * Start with the preceding region, as it may already cross the
764 * range we must invalidate.
765 */
766 it--;
767 }
768 while (it != m_dmi_aliases.end()) {
769 DmiRegionAlias::Ptr r = it->second;
770
771 if (r->get_start() > end_range) {
772 /* We've got out of the invalidation range */
773 break;
774 }
775
776 if (r->get_end() < start_range) {
777 /* We are not in yet */
778 it++;
779 continue;
780 }
781
782 it = remove_alias(it);
783
784 SCP_DEBUG(()) << "Invalidated region [0x" << std::hex << r->get_start() << ", 0x" << std::hex
785 << r->get_end() << "]";
786 }
787 }
788
789 void invalidate_ranges_safe_cb()
790 {
791 std::lock_guard<std::mutex> lock(m_mutex);
792
793 SCP_DEBUG(()) << "Invalidating " << m_ranges.size() << " ranges";
794 auto rit = m_ranges.begin();
795 while (rit != m_ranges.end()) {
796 invalidate_single_range(rit->first, rit->second);
797 rit = m_ranges.erase(rit);
798 }
799 }
800
801public:
802 virtual void invalidate_direct_mem_ptr(sc_dt::uint64 start_range, sc_dt::uint64 end_range)
803 {
804 if (m_finished) return;
805 SCP_DEBUG(()) << "DMI invalidate [0x" << std::hex << start_range << ", 0x" << std::hex << end_range << "]";
806
807 {
808 std::lock_guard<std::mutex> lock(m_mutex);
809
810 for (auto m : m_mmio_mrs) {
811 auto mr_start = m.first;
812 auto mr_end = m.first + m.second->get_size();
813 if ((mr_start >= start_range && mr_start <= end_range) ||
815 for (auto it = m.second->m_mapped_te.begin(); it != m.second->m_mapped_te.end();) {
816 if ((it->first << m.second->min_page_sz) + mr_start >= start_range &&
817 (it->first << m.second->min_page_sz) + mr_start < end_range) {
818 m.second->iommu_unmap(&(it->second));
819 it = m.second->m_mapped_te.erase(it);
820 } else
821 it++;
822 }
823 return; // If we found this, then we're done. Overlapping IOMMU's are not allowed.
824 }
825 }
826 }
827 {
828 std::lock_guard<std::mutex> lock(m_mutex);
829 m_ranges.push_back(std::make_pair(start_range, end_range));
830 }
831
832 m_initiator.initiator_async_run([&]() { invalidate_ranges_safe_cb(); });
833
834 /* For 7.2 this may need to be safe aync work ???????? */
835 }
836
837 virtual void reset()
838 {
839 std::lock_guard<std::mutex> lock(m_mutex);
840
841 for (auto m : m_mmio_mrs) {
842 m.second->m_mapped_te.clear();
843 auto it = m_dmi_aliases.begin();
844 while (it != m_dmi_aliases.end()) {
845 DmiRegionAlias::Ptr r = it->second;
846 it = remove_alias(it);
847 }
848 }
849 }
850};
851
852#endif
Exclusive load/store TLM extension.
Definition exclusive-access.h:36
Definition initiator.h:38
Definition initiator.h:95
TLM-2.0 initiator socket specialisation for QEMU AddressSpace mapping.
Definition initiator.h:63
tlm::tlm_dmi check_dmi_hint_locked(TlmPayload &trans)
Request a DMI region, ask the QEMU instance DMI manager for a DMI region alias for it and map it on t...
Definition initiator.h:318
void dmi_translate(qemu::IOMMUMemoryRegion::IOMMUTLBEntry *te, std::shared_ptr< qemu::IOMMUMemoryRegion > iommumr, uint64_t base_addr, uint64_t addr, qemu::IOMMUMemoryRegion::IOMMUAccessFlags flags, int idx)
Use DMI data to set up a qemu IOMMU translate.
Definition initiator.h:149
An alias to a DMI region.
Definition dmi-manager.h:140
a DMI region
Definition dmi-manager.h:77
DmiRegionAlias::Ptr get_new_region_alias(const tlm::tlm_dmi &info, int fd=-1)
Create a new alias for the DMI region designated by info
Definition dmi-manager.h:283
This class encapsulates a libqemu-cxx qemu::LibQemu instance. It handles QEMU parameters and instance...
Definition qemu-instance.h:89
qemu::LibQemu & get()
Returns the underlying qemu::LibQemu instance.
Definition qemu-instance.h:463
QemuInstanceDmiManager & get_dmi_manager()
Returns the locked QemuInstanceDmiManager instance.
Definition qemu-instance.h:479
Definition qemu-mr-hint.h:17
Definition target.h:160
Definition shmem_extension.h:24
Definition underlying-dmi.h:34
Definition runonsysc.h:23
void cancel_all()
Cancel all pending and running jobs.
Definition runonsysc.h:149
bool run_on_sysc(std::function< void()> job_entry, bool wait=true)
Run a job on the SystemC kernel thread.
Definition runonsysc.h:183
Definition libqemu-cxx.h:605
Definition libqemu-cxx.h:85
Definition libqemu-cxx.h:481
Definition libqemu-cxx.h:355
Definition libqemu-cxx.h:206
Definition libqemu-cxx.h:464
Definition libqemu-cxx.h:318