61 :
public tlm::tlm_initiator_socket<BUSWIDTH, tlm::tlm_base_protocol_types, 1, sc_core::SC_ZERO_OR_MORE_BOUND>,
62 public tlm::tlm_bw_transport_if<>
66 std::vector<std::pair<sc_dt::uint64, sc_dt::uint64>> m_ranges;
71 using TlmInitiatorSocket = tlm::tlm_initiator_socket<
BUSWIDTH, tlm::tlm_base_protocol_types, 1,
72 sc_core::SC_ZERO_OR_MORE_BOUND>;
73 using TlmPayload = tlm::tlm_generic_payload;
74 using MemTxResult = qemu::MemoryRegionOps::MemTxResult;
88 bool m_finished =
false;
90 std::shared_ptr<qemu::AddressSpace> m_as;
91 std::shared_ptr<qemu::MemoryListener> m_listener;
92 std::map<uint64_t, std::shared_ptr<qemu::IOMMUMemoryRegion>> m_mmio_mrs;
97 std::shared_ptr<qemu::MemoryRegion> m_root;
104 std::map<DmiRegionAliasKey, DmiRegionAlias::Ptr> m_dmi_aliases;
105 using AliasesIterator = std::map<DmiRegionAliasKey, DmiRegionAlias::Ptr>::iterator;
111 trans.set_data_ptr(
reinterpret_cast<unsigned char*
>(
val));
112 trans.set_data_length(size);
113 trans.set_streaming_width(size);
114 trans.set_byte_enable_length(0);
115 trans.set_dmi_allowed(
false);
116 trans.set_response_status(tlm::TLM_INCOMPLETE_RESPONSE);
118 m_initiator.initiator_customize_tlm_payload(
trans);
121 void add_dmi_mr_alias(DmiRegionAlias::Ptr
alias)
126 alias->set_installed();
129 void del_dmi_mr_alias(
const DmiRegionAlias::Ptr
alias)
131 if (!
alias->is_installed()) {
135 m_r->m_root->del_subregion(
alias->get_alias_mr());
164 std::lock_guard<std::mutex>
lock(m_mutex);
167 if (
m !=
iommumr->m_mapped_te.end()) {
170 (
"FAST (unord) translate for 0x{:x} : 0x{:x}->0x{:x} (mask 0x{:x}) perm={}",
addr,
te->iova,
171 te->translated_addr,
te->addr_mask,
te->perm);
198 if (
lu_dmi.has_dmi(gs::tlm_dmi_ex::dmi_iommu)) {
208 (
"Adding IOMMU DMI Region start 0x{:x} - 0x{:x}",
lu_dmi_data.get_start_address(),
214 te->target_as =
iommumr->m_as_te->get_ptr();
219 te->perm = (qemu::IOMMUMemoryRegion::IOMMUAccessFlags)
ldmi_data.get_granted_access();
222 (
"Translate IOMMU 0x{:x}->0x{:x} (mask 0x{:x})",
te->iova,
te->translated_addr,
te->addr_mask);
232 alias->set_installed();
236 te->target_as =
iommumr->m_as->get_ptr();
240 te->perm = (qemu::IOMMUMemoryRegion::IOMMUAccessFlags)
ldmi_data.get_granted_access();
243 (
"Translate 1-1 passthrough 0x{:x}->0x{:x} (mask 0x{:x})",
te->iova,
te->translated_addr,
248 SCP_WARN(())(
"Could have used the cache! {:x}\n",
addr);
256 std::lock_guard<std::mutex>
lock(m_mutex);
259 (
"Caching TE at addr 0x{:x} (mask {:x})",
addr &
~te->addr_mask,
te->addr_mask);
265 te->target_as =
iommumr->m_as->get_ptr();
266 te->addr_mask = (1 <<
iommumr->min_page_sz) - 1;
269 te->perm = qemu::IOMMUMemoryRegion::IOMMU_RW;
272 (
"Translate 1-1 limited passthrough 0x{:x}->0x{:x} (mask 0x{:x})",
te->iova,
te->translated_addr,
321 tlm::tlm_dmi dmi_data;
324 SCP_INFO(()) <<
"DMI request for address 0x" << std::hex <<
trans.get_address();
331 bool dmi_valid = (*this)->get_direct_mem_ptr(
trans, dmi_data);
334 SCP_INFO(())(
"No DMI available for {:x}",
trans.get_address());
338 if (
u_dmi.has_dmi(gs::tlm_dmi_ex::dmi_mapped)) {
339 tlm::tlm_dmi
first_map =
u_dmi.get_first(gs::tlm_dmi_ex::dmi_mapped);
342 if (
u_dmi.has_dmi(gs::tlm_dmi_ex::dmi_nomap)) {
359 if (
u_dmi.has_dmi(gs::tlm_dmi_ex::dmi_iommu)) {
361 SCP_INFO(())(
"IOMMU DMI available for {:x}",
trans.get_address());
364 tlm::tlm_dmi
first_map =
u_dmi.get_first(gs::tlm_dmi_ex::dmi_mapped);
369 auto itr = m_mmio_mrs.find(start);
370 if (
itr == m_mmio_mrs.end()) {
376 invalidate_single_range(start, start + size);
379 (
"Adding IOMMU for VA 0x{:x} [0x{:x} - 0x{:x}]",
trans.get_address(), start, start + size);
381 using namespace std::placeholders;
382 qemu::MemoryRegionOpsPtr
ops;
383 ops = m_inst.
get().memory_region_ops_new();
384 ops->set_read_callback(std::bind(&QemuInitiatorSocket::qemu_io_read,
this,
_1,
_2,
_3,
_4));
385 ops->set_write_callback(std::bind(&QemuInitiatorSocket::qemu_io_write,
this,
_1,
_2,
_3,
_4));
386 ops->set_max_access_size(8);
388 auto iommumr = std::make_shared<qemu::IOMMUMemoryRegion>(
393 qemu::IOMMUMemoryRegion::IOMMUAccessFlags flags,
396 std::lock_guard<std::mutex>
lock(m_mutex);
399 m_r->m_root->add_subregion(*
iommumr, start);
408 (
"Memory request should be directed via MMIO interface {:x} {:x}", start,
trans.get_address());
427 SCP_INFO(()) <<
"DMI Adding for address 0x" << std::hex <<
trans.get_address();
444 if (m_dmi_aliases.size() > MAX_MAP) {
445 SCP_FATAL(())(
"Too many DMI regions requested, consider using an IOMMU");
447 uint64_t start = dmi_data.get_start_address();
448 uint64_t end = dmi_data.get_end_address();
450 if (0 == m_dmi_aliases.count(start)) {
451 SCP_INFO(()) <<
"Adding DMI for range [0x" << std::hex << dmi_data.get_start_address() <<
"-0x" << std::hex
452 << dmi_data.get_end_address() <<
"]";
456 m_dmi_aliases[start] =
alias;
457 add_dmi_mr_alias(m_dmi_aliases[start]);
459 SCP_INFO(())(
"Already have DMI for 0x{:x}", start);
464 void check_qemu_mr_hint(TlmPayload&
trans)
471 if (
ext ==
nullptr) {
477 if (
target_mr.get_inst_id() != m_dev.get_inst_id()) {
489 void do_regular_access(TlmPayload&
trans)
491 using sc_core::sc_time;
494 sc_time
now = m_initiator.initiator_get_local_time();
496 m_inst.
get().unlock_iothread();
498 m_inst.
get().lock_iothread();
504 check_qemu_mr_hint(
trans);
505 if (
trans.is_dmi_allowed()) {
509 m_initiator.initiator_set_local_time(
now);
512 void do_debug_access(TlmPayload&
trans)
514 m_inst.
get().unlock_iothread();
516 m_inst.
get().lock_iothread();
519 void do_direct_access(TlmPayload&
trans)
521 sc_core::sc_time
now = m_initiator.initiator_get_local_time();
529 if (m_finished)
return qemu::MemoryRegionOps::MemTxError;
537 do_direct_access(
trans);
539 if (!m_inst.g_rec_qemu_io_lock.try_lock()) {
545 m_inst.
get().unlock_iothread();
546 m_inst.g_rec_qemu_io_lock.lock();
547 m_inst.
get().lock_iothread();
552 if (reentrancy > 1) {
553 do_direct_access(
trans);
554 }
else if (
attrs.debug) {
555 do_debug_access(
trans);
557 do_regular_access(
trans);
561 m_inst.g_rec_qemu_io_lock.unlock();
563 m_initiator.initiator_tidy_tlm_payload(
trans);
565 switch (
trans.get_response_status()) {
566 case tlm::TLM_OK_RESPONSE:
567 return qemu::MemoryRegionOps::MemTxOK;
569 case tlm::TLM_ADDRESS_ERROR_RESPONSE:
570 return qemu::MemoryRegionOps::MemTxDecodeError;
573 return qemu::MemoryRegionOps::MemTxError;
580 return qemu_io_access(tlm::TLM_READ_COMMAND,
addr,
val, size,
attrs);
585 return qemu_io_access(tlm::TLM_WRITE_COMMAND,
addr, &
val, size,
attrs);
589 : TlmInitiatorSocket(name)
594 SCP_DEBUG(()) <<
"QemuInitiatorSocket constructor";
595 TlmInitiatorSocket::bind(*
static_cast<tlm::tlm_bw_transport_if<>*
>(
this));
600 using namespace std::placeholders;
603 qemu::MemoryRegionOpsPtr
ops;
605 m_r =
new m_mem_obj(inst);
606 ops = inst.memory_region_ops_new();
608 ops->set_read_callback(std::bind(&QemuInitiatorSocket::qemu_io_read,
this,
_1,
_2,
_3,
_4));
609 ops->set_write_callback(std::bind(&QemuInitiatorSocket::qemu_io_write,
this,
_1,
_2,
_3,
_4));
610 ops->set_max_access_size(8);
612 m_r->m_root->init_io(
dev, TlmInitiatorSocket::name(), std::numeric_limits<uint64_t>::max(),
ops);
613 dev.set_prop_link(
prop, *m_r->m_root);
618 void end_of_simulation()
631 m_r->m_root->removeSubRegions();
642 if (m_finished)
return;
644 SCP_DEBUG(()) <<
"Mapping request for address [0x" << std::hex <<
addr <<
"-0x" <<
addr +
len <<
"]";
650 trans.set_dmi_allowed(
true);
658 SCP_INFO(()) <<
"0x" << std::hex <<
current_addr <<
" mapped [0x" << dmi_data.get_start_address() <<
"-0x"
659 << dmi_data.get_end_address() <<
"]";
669 m_initiator.initiator_tidy_tlm_payload(
trans);
674 using namespace std::placeholders;
677 qemu::MemoryRegionOpsPtr
ops;
678 ops = inst.memory_region_ops_new();
680 ops->set_read_callback(std::bind(&QemuInitiatorSocket::qemu_io_read,
this,
_1,
_2,
_3,
_4));
681 ops->set_write_callback(std::bind(&QemuInitiatorSocket::qemu_io_write,
this,
_1,
_2,
_3,
_4));
682 ops->set_max_access_size(8);
685 system_memory->init_io(
dev, TlmInitiatorSocket::name(), std::numeric_limits<uint64_t>::max() - 1,
ops);
688 m_as = inst.address_space_get_system_memory();
691 m_as->update_topology();
693 m_listener = inst.memory_listener_new();
694 m_listener->set_map_callback(std::bind(&QemuInitiatorSocket::qemu_map,
this,
_1,
_2,
_3));
695 m_listener->register_as(m_as);
703 virtual tlm::tlm_sync_enum nb_transport_bw(tlm::tlm_generic_payload&
trans, tlm::tlm_phase&
phase,
708 return tlm::TLM_COMPLETED;
711 virtual AliasesIterator remove_alias(AliasesIterator
it)
713 DmiRegionAlias::Ptr r =
it->second;
730 assert(r->is_installed());
753 return m_dmi_aliases.erase(
it);
761 if (
it != m_dmi_aliases.begin()) {
768 while (
it != m_dmi_aliases.end()) {
769 DmiRegionAlias::Ptr r =
it->second;
782 it = remove_alias(
it);
784 SCP_DEBUG(()) <<
"Invalidated region [0x" << std::hex << r->get_start() <<
", 0x" << std::hex
785 << r->get_end() <<
"]";
789 void invalidate_ranges_safe_cb()
791 std::lock_guard<std::mutex>
lock(m_mutex);
793 SCP_DEBUG(()) <<
"Invalidating " << m_ranges.size() <<
" ranges";
794 auto rit = m_ranges.begin();
795 while (
rit != m_ranges.end()) {
796 invalidate_single_range(
rit->first,
rit->second);
797 rit = m_ranges.erase(
rit);
804 if (m_finished)
return;
808 std::lock_guard<std::mutex>
lock(m_mutex);
810 for (
auto m : m_mmio_mrs) {
812 auto mr_end =
m.first +
m.second->get_size();
815 for (
auto it =
m.second->m_mapped_te.begin();
it !=
m.second->m_mapped_te.end();) {
818 m.second->iommu_unmap(&(
it->second));
819 it =
m.second->m_mapped_te.erase(
it);
828 std::lock_guard<std::mutex>
lock(m_mutex);
832 m_initiator.initiator_async_run([&]() { invalidate_ranges_safe_cb(); });
839 std::lock_guard<std::mutex>
lock(m_mutex);
841 for (
auto m : m_mmio_mrs) {
842 m.second->m_mapped_te.clear();
843 auto it = m_dmi_aliases.begin();
844 while (
it != m_dmi_aliases.end()) {
845 DmiRegionAlias::Ptr r =
it->second;
846 it = remove_alias(
it);