quic/qbox
Loading...
Searching...
No Matches
router.h
1/*
2 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All Rights Reserved.
3 * Author: GreenSocs 2022
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8#ifndef _GREENSOCS_BASE_COMPONENTS_ROUTER_H
9#define _GREENSOCS_BASE_COMPONENTS_ROUTER_H
10
11#include <cinttypes>
12#include <utility>
13#include <vector>
14#include <map>
15#include <set> // Required for std::set
16#include <limits> // Required for std::numeric_limits
17#include <memory> // Required for std::shared_ptrs
18#include <unordered_map> // Required for std::unordered_map
19#include <list> // Required for std::list
20#include <functional> // Required for std::less
21
22/* Theoretically a DMI request can be failed with no ill effects, and to protect against re-entrant code
23 * between a DMI invalidate and a DMI request on separate threads, effectively requiring work to be done
24 * on the same thread, we make use of this by 'try_lock'ing and failing the DMI. However this has the
25 * negative side effect that up-stream models may not get DMI's when they expect them, which at the very
26 * least would be a performance hit.
27 * Define this as true if you require protection against re-entrant models.
28 */
29#define THREAD_SAFE_REENTRANT false
30#define THREAD_SAFE true
31#if defined(THREAD_SAFE) and THREAD_SAFE
32#include <mutex>
33#include <shared_mutex>
34#endif
35#include <atomic>
36
37#include <cci_configuration>
38#include <systemc>
39#include <tlm>
40#include <scp/report.h>
41#include <scp/helpers.h>
42#include <cciutils.h>
43
44#include <tlm_utils/multi_passthrough_initiator_socket.h>
45#include <tlm_utils/multi_passthrough_target_socket.h>
46#include <tlm-extensions/pathid_extension.h>
47#include <tlm-extensions/underlying-dmi.h>
48
49#include <router_if.h>
50#include <module_factory_registery.h>
51#include <tlm_sockets_buswidth.h>
52
53namespace gs {
54
55template <typename Key, typename Value>
57{
58public:
59 virtual ~AddrMapCacheBase() = default;
60 virtual bool get(const Key& key, Value& value) = 0;
61 virtual void put(const Key& key, const Value& value, uint64_t size) = 0;
62 virtual void clear() = 0;
63
64 // Statistics interface
65 virtual uint64_t get_hits() const = 0;
66 virtual uint64_t get_misses() const = 0;
67 virtual void reset_stats() = 0;
68};
69
74template <typename Key, typename Value>
75class AddrMapNoCache : public AddrMapCacheBase<Key, Value>
76{
77public:
79 bool get(const Key& key, Value& value) override
80 {
81 (void)key; // Suppress unused parameter warning
82 (void)value; // Suppress unused parameter warning
83 return false;
84 }
85
87 void put(const Key& key, const Value& value, [[maybe_unused]] uint64_t size) override
88 {
89 (void)key; // Suppress unused parameter warning
90 (void)value; // Suppress unused parameter warning
91 }
92
94 void clear() override {}
95
96 // Statistics interface implementation
97 uint64_t get_hits() const override { return 0; }
98 uint64_t get_misses() const override { return 0; }
99 void reset_stats() override {}
100};
101
112template <unsigned int BUSWIDTH = DEFAULT_TLM_BUSWIDTH, template <typename, typename> class CacheType = AddrMapNoCache>
113class router : public sc_core::sc_module, public gs::router_if<BUSWIDTH>
114{
116 using TargetSocket = tlm::tlm_base_target_socket_b<BUSWIDTH, tlm::tlm_fw_transport_if<>,
117 tlm::tlm_bw_transport_if<>>;
119 using InitiatorSocket = tlm::tlm_base_initiator_socket_b<BUSWIDTH, tlm::tlm_fw_transport_if<>,
120 tlm::tlm_bw_transport_if<>>;
124 using initiator_socket_type = typename gs::router<
127 using gs::router_if<BUSWIDTH>::bound_targets;
128
148 template <typename TargetInfoType>
149 class addressMap
150 {
151 private:
158 struct Region {
159 uint64_t start;
160 uint64_t end;
161 std::shared_ptr<TargetInfoType> target;
162
163 Region(uint64_t s, uint64_t e, std::shared_ptr<TargetInfoType> t): start(s), end(e), target(t) {}
164 };
165
167 std::map<uint64_t, Region> m_regions;
168
169 CacheImpl m_cache;
170
188 void split_and_resolve(uint64_t new_start, uint64_t new_end, std::shared_ptr<TargetInfoType> new_target)
189 {
190 // Step 1: Collect all existing regions that overlap with the new region
191 std::vector<Region> overlapping_regions;
192 std::vector<uint64_t> keys_to_remove;
193
194 // Find overlapping regions using interval overlap test: (a.start < b.end && a.end > b.start)
195 for (const auto& [start_addr, region] : m_regions) {
196 if (region.start < new_end && region.end > new_start) {
197 overlapping_regions.push_back(region);
198 keys_to_remove.push_back(start_addr);
199 }
200 }
201
202 // Step 2: Remove overlapping regions from the map (they'll be re-added after splitting)
203 for (uint64_t key : keys_to_remove) {
204 m_regions.erase(key);
205 }
206
207 // Step 3: Create intervals for all regions that need to be resolved
208 struct Interval {
209 uint64_t start, end;
210 std::shared_ptr<TargetInfoType> target;
211 uint32_t priority;
212
213 Interval(uint64_t s, uint64_t e, std::shared_ptr<TargetInfoType> t)
214 : start(s), end(e), target(t), priority(t->priority)
215 {
216 }
217 };
218
219 std::vector<Interval> intervals;
220 intervals.reserve(overlapping_regions.size() + 1); // Optimize allocation
221
222 // Add the new region
223 intervals.emplace_back(new_start, new_end, new_target);
224
225 // Add all overlapping existing regions
226 for (const Region& region : overlapping_regions) {
227 intervals.emplace_back(region.start, region.end, region.target);
228 }
229
230 // Step 4: Create sorted list of all boundary points for segmentation
231 std::set<uint64_t> boundaries;
232 for (const Interval& interval : intervals) {
233 boundaries.insert(interval.start);
234 boundaries.insert(interval.end);
235 }
236
237 // Step 5: Process each segment between boundaries and resolve conflicts
238 auto boundary_it = boundaries.begin();
239 while (boundary_it != boundaries.end()) {
241 ++boundary_it;
242 if (boundary_it == boundaries.end()) break;
244
245 // Find the highest priority target for this segment
246 std::shared_ptr<TargetInfoType> best_target = nullptr;
248
249 for (const Interval& interval : intervals) {
250 // Check if this interval covers the current segment
251 if (interval.start <= seg_start && interval.end >= seg_end) {
252 if (interval.priority < best_priority) {
253 best_priority = interval.priority;
254 best_target = interval.target;
255 }
256 }
257 }
258
259 // Add the resolved segment to the final map
260 if (best_target) {
261 m_regions.emplace(seg_start, Region(seg_start, seg_end, best_target));
262 }
263 }
264
265 // Step 6: Check for completely shadowed regions and issue warnings
266 for (const Interval& interval : intervals) {
267 // Skip the newly added region (it can't be shadowed by itself)
268 if (interval.start == new_start && interval.end == new_end && interval.target == new_target) {
269 continue;
270 }
271
272 // Check if this interval is completely covered by higher priority regions
273 // We do this by checking if any part of this interval "wins" in the final mapping
274 bool completely_shadowed = true;
275
276 // Look through the final resolved regions to see if any belong to this interval's target
277 for (const auto& [seg_start, region] : m_regions) {
278 // Check if this final region overlaps with our interval and belongs to our target
279 if (region.target == interval.target && region.start < interval.end &&
280 region.end > interval.start) {
281 // This interval has at least some part that's not shadowed
282 completely_shadowed = false;
283 break;
284 }
285 }
286
288 std::stringstream ss;
289 ss << "Region '" << interval.target->name << "' (0x" << std::hex << interval.start << "-0x"
290 << (interval.end - 1) << ", priority " << std::dec << interval.priority
291 << ") is completely shadowed by higher priority regions and will never be accessed";
292 SC_REPORT_WARNING("addressMap", ss.str().c_str());
293 }
294 }
295 }
296
297 public:
299 addressMap() = default;
300
315 void add(std::shared_ptr<TargetInfoType> t_info)
316 {
317 uint64_t start = t_info->address;
318 uint64_t end = t_info->address + t_info->size;
319
320 split_and_resolve(start, end, t_info);
321
322 // Clear cache after map modification to ensure consistency
323 m_cache.clear();
324 }
325
339 std::shared_ptr<TargetInfoType> find(uint64_t address, uint64_t size)
340 {
341 // Fast path: check LRU cache first
342 std::shared_ptr<TargetInfoType> cached_result;
343 if (m_cache.get(address, cached_result)) {
344 return cached_result;
345 }
346
347 // Slow path: search the map using upper_bound for O(log n) lookup
348 // upper_bound finds the first region with start > address
349 auto it = m_regions.upper_bound(address);
350 if (it != m_regions.begin()) {
351 --it; // Move to the region that might contain our address
352
353 // Check if address falls within this region [start, end)
354 if (address >= it->second.start && address < it->second.end) {
355 // Cache the result for future lookups
356 m_cache.put(address, it->second.target, size);
357 return it->second.target;
358 }
359 }
360
361 // Address not found - cache the negative result to avoid repeated lookups
362 m_cache.put(address, nullptr, size);
363 return nullptr;
364 }
365
382 std::shared_ptr<TargetInfoType> find_region(uint64_t addr, tlm::tlm_dmi& dmi)
383 {
384 // First try to find a mapped region using the same logic as find()
385 auto it = m_regions.upper_bound(addr);
386 if (it != m_regions.begin()) {
387 --it;
388 if (addr >= it->second.start && addr < it->second.end) {
389 // Found mapped region - return the full extent of the original memory region
390 // This is crucial for DMI: clients expect access to the entire memory,
391 // not just the split segment that won the priority resolution
392 std::shared_ptr<TargetInfoType> target = it->second.target;
393 dmi.set_start_address(target->address);
394 dmi.set_end_address(target->address + target->size - 1); // inclusive end
395 return target;
396 }
397 }
398
399 // Address is in a hole - calculate hole boundaries for DMI invalidation
401 uint64_t hole_end = std::numeric_limits<uint64_t>::max();
402
403 // Find the region immediately before this address
404 it = m_regions.upper_bound(addr);
405 if (it != m_regions.begin()) {
406 --it;
407 if (it->second.end <= addr) {
408 hole_start = it->second.end;
409 }
410 }
411
412 // Find the region immediately after this address
413 it = m_regions.upper_bound(addr);
414 if (it != m_regions.end()) {
415 hole_end = it->second.start;
416 }
417
418 // Set DMI boundaries for the hole
419 dmi.set_start_address(hole_start);
420 dmi.set_end_address(hole_end == 0 ? 0 : hole_end - 1); // Handle edge case
421 return nullptr;
422 }
423
429 void get_cache_stats(uint64_t& hits, uint64_t& misses) const
430 {
431 hits = m_cache.get_hits();
432 misses = m_cache.get_misses();
433 }
434
438 void reset_cache_stats() { m_cache.reset_stats(); }
439 };
440
441 SCP_LOGGER_VECTOR(D);
442 SCP_LOGGER(());
443 SCP_LOGGER((DMI), "dmi");
444
445private:
446#if defined(THREAD_SAFE) and THREAD_SAFE
448 std::mutex m_dmi_mutex;
449#endif
451 struct dmi_info {
452 std::set<int> initiators;
453 tlm::tlm_dmi dmi;
455 dmi_info(tlm::tlm_dmi& _dmi) { dmi = _dmi; }
456 };
458 std::map<uint64_t, dmi_info> m_dmi_info_map;
459
470 dmi_info* in_dmi_cache(tlm::tlm_dmi& dmi)
471 {
472 auto it = m_dmi_info_map.find(dmi.get_start_address());
473 if (it != m_dmi_info_map.end()) {
474 if (it->second.dmi.get_end_address() != dmi.get_end_address()) {
475 std::stringstream ss;
476 ss << "Can't handle that: DMI overlap with differing end address (0x" << std::hex
477 << it->second.dmi.get_end_address() << " vs 0x" << dmi.get_end_address() << ")";
478 SC_REPORT_ERROR("DMI", ss.str().c_str());
479 }
480 return &(it->second);
481 }
482 auto insit = m_dmi_info_map.emplace(dmi.get_start_address(), dmi_info(dmi));
483 return &(insit.first->second);
484 }
485
496 void record_dmi(int id, tlm::tlm_dmi& dmi)
497 {
498 auto it = m_dmi_info_map.find(dmi.get_start_address());
499 if (it != m_dmi_info_map.end()) {
500 if (it->second.dmi.get_end_address() != dmi.get_end_address()) {
501 SCP_WARN((DMI)) << "A new DMI overlaps with an old one, invalidating the old one";
502 invalidate_direct_mem_ptr_ts(0, dmi.get_start_address(),
503 dmi.get_end_address()); // id will be ignored
504 }
505 }
506
507 dmi_info* dinfo = in_dmi_cache(dmi);
508 dinfo->initiators.insert(id);
509 }
510
520 void register_boundto(std::string s) override
521 {
523 // Create a shared_ptr for the new target_info
524 std::shared_ptr<target_info> ti_ptr = std::make_shared<target_info>();
525 ti_ptr->name = s;
526 ti_ptr->index = bound_targets.size(); // Current size will be its index
528 SCP_DEBUG((D[ti_ptr->index])) << "Connecting : " << ti_ptr->name;
529 ti_ptr->chained = false;
530 std::string tmp = name();
531 int i;
532 for (i = 0; i < tmp.length(); i++)
533 if (s[i] != tmp[i]) break;
534 ti_ptr->shortname = s.substr(i);
535 bound_targets.push_back(ti_ptr); // Add shared_ptr to bound_targets
536 }
537
548 std::string txn_tostring(const target_info* ti, tlm::tlm_generic_payload& trans)
549 {
550 std::stringstream info;
551 const char* cmd = "UNKOWN";
552 switch (trans.get_command()) {
553 case tlm::TLM_IGNORE_COMMAND:
554 info << "IGNORE ";
555 break;
556 case tlm::TLM_WRITE_COMMAND:
557 info << "WRITE ";
558 break;
559 case tlm::TLM_READ_COMMAND:
560 info << "READ ";
561 break;
562 }
563
564 info << " address:"
565 << "0x" << std::hex << trans.get_address();
566 info << " len:" << trans.get_data_length();
567 unsigned char* ptr = trans.get_data_ptr();
568 if ((trans.get_command() == tlm::TLM_READ_COMMAND && trans.get_response_status() == tlm::TLM_OK_RESPONSE) ||
569 (trans.get_command() == tlm::TLM_WRITE_COMMAND &&
570 trans.get_response_status() == tlm::TLM_INCOMPLETE_RESPONSE)) {
571 info << " data:0x";
572 for (int i = trans.get_data_length(); i; i--) {
573 info << std::setw(2) << std::setfill('0') << std::hex << (unsigned int)(ptr[i - 1]);
574 }
575 }
576 info << " " << trans.get_response_string() << " ";
577 for (int i = 0; i < tlm::max_num_extensions(); i++) {
578 if (trans.get_extension(i)) {
579 info << " extn:" << i;
580 }
581 }
582 return info.str();
583 }
584
585public:
587 initiator_socket_type initiator_socket;
589 tlm_utils::multi_passthrough_target_socket<router<BUSWIDTH, CacheType>, BUSWIDTH> target_socket;
591 cci::cci_broker_handle m_broker;
592
593private:
595 std::vector<std::shared_ptr<target_info>> alias_targets;
597 addressMap<target_info> m_address_map;
599 std::vector<std::shared_ptr<target_info>> id_targets;
600
602 std::vector<PathIDExtension*> m_pathIDPool;
603#if defined(THREAD_SAFE) and THREAD_SAFE
605 std::mutex m_pool_mutex;
606#endif
607
617 void stamp_txn(int id, tlm::tlm_generic_payload& txn)
618 {
619 PathIDExtension* ext = nullptr;
620 txn.get_extension(ext);
621 if (ext == nullptr) {
622#if defined(THREAD_SAFE) and THREAD_SAFE
623 std::lock_guard<std::mutex> l(m_pool_mutex);
624#endif
625 if (m_pathIDPool.size() == 0) {
626 ext = new PathIDExtension();
627 } else {
628 ext = m_pathIDPool.back();
629 m_pathIDPool.pop_back();
630 }
631 txn.set_extension(ext);
632 }
633 ext->push_back(id);
634 }
635
645 void unstamp_txn(int id, tlm::tlm_generic_payload& txn)
646 {
648 txn.get_extension(ext);
649 assert(ext);
650 assert(ext->back() == id);
651 ext->pop_back();
652 if (ext->size() == 0) {
653#if defined(THREAD_SAFE) and THREAD_SAFE
654 std::lock_guard<std::mutex> l(m_pool_mutex);
655#endif
656 txn.clear_extension(ext);
657 m_pathIDPool.push_back(ext);
658 }
659 }
660
673 void b_transport(int id, tlm::tlm_generic_payload& trans, sc_core::sc_time& delay)
674 {
675 sc_dt::uint64 addr = trans.get_address();
676 auto ti = decode_address(trans);
677 if (!ti) {
678 SCP_WARN(())("Attempt to access unknown register at offset 0x{:x}", addr);
679 trans.set_response_status(tlm::TLM_ADDRESS_ERROR_RESPONSE);
680 return;
681 }
682
683 stamp_txn(id, trans);
684 if (!ti->chained) {
685 SCP_TRACE((D[ti->index]), ti->name) << "Start b_transport :" << txn_tostring(ti.get(), trans);
686 }
687 if (trans.get_response_status() >= tlm::TLM_INCOMPLETE_RESPONSE) {
688 if (ti->use_offset) trans.set_address(addr - ti->address);
689
690 initiator_socket[ti->index]->b_transport(trans, delay);
691
692 if (ti->use_offset) trans.set_address(addr);
693 }
694 if (!ti->chained) {
695 SCP_TRACE((D[ti->index]), ti->name) << "Completed b_transport :" << txn_tostring(ti.get(), trans);
696 }
697 unstamp_txn(id, trans);
698 }
699
711 unsigned int transport_dbg(int id, tlm::tlm_generic_payload& trans)
712 {
713 // Ensure router is initialized (thread-safe)
714 lazy_initialize();
715
716 sc_dt::uint64 addr = trans.get_address();
717 auto ti = decode_address(trans);
718 if (!ti) {
719 trans.set_response_status(tlm::TLM_ADDRESS_ERROR_RESPONSE);
720 return 0;
721 }
722
723 if (ti->use_offset) trans.set_address(addr - ti->address);
724 SCP_TRACE((D[ti->index]), ti->name) << "calling dbg_transport : " << scp::scp_txn_tostring(trans);
725 unsigned int ret = initiator_socket[ti->index]->transport_dbg(trans);
726 if (ti->use_offset) trans.set_address(addr);
727 return ret;
728 }
729
742 bool get_direct_mem_ptr(int id, tlm::tlm_generic_payload& trans, tlm::tlm_dmi& dmi_data)
743 {
744 // Ensure router is initialized (thread-safe)
745 lazy_initialize();
746
747 sc_dt::uint64 addr = trans.get_address();
748
749 tlm::tlm_dmi dmi_data_hole;
750 auto ti = m_address_map.find_region(addr, dmi_data_hole);
751
752 UnderlyingDMITlmExtension* u_dmi;
753 trans.get_extension(u_dmi);
754 if (u_dmi) {
755 SCP_DEBUG(())
756 ("DMI info 0x{:x} 0x{:x} {}", dmi_data_hole.get_start_address(), dmi_data_hole.get_end_address(),
757 (ti ? "mapped" : "nomap"));
758 u_dmi->add_dmi(this, dmi_data_hole, (ti ? gs::tlm_dmi_ex::dmi_mapped : gs::tlm_dmi_ex::dmi_nomap));
759 }
760
761 if (!ti) {
762 return false;
763 }
764#if defined(THREAD_SAFE_REENTRANT) and THREAD_SAFE_REENTRANT
765 if (!m_dmi_mutex.try_lock()) { // if we're busy invalidating, dont grant DMI's
766 return false;
767 }
768#else
769 m_dmi_mutex.lock();
770#endif
771
772 if (ti->use_offset) trans.set_address(addr - ti->address);
773 SCP_TRACE((D[ti->index]), ti->name) << "calling get_direct_mem_ptr : " << scp::scp_txn_tostring(trans);
774 bool status = initiator_socket[ti->index]->get_direct_mem_ptr(trans, dmi_data);
775 if (ti->use_offset) trans.set_address(addr);
776 if (status) {
777 if (ti->use_offset) {
778 assert(dmi_data.get_start_address() < ti->size);
779 dmi_data.set_start_address(ti->address + dmi_data.get_start_address());
780 dmi_data.set_end_address(ti->address + dmi_data.get_end_address());
781 }
782 /* ensure we dont overspill the 'hole' we have in the address map */
783 if (dmi_data.get_start_address() < dmi_data_hole.get_start_address()) {
784 dmi_data.set_dmi_ptr(dmi_data.get_dmi_ptr() +
785 (dmi_data_hole.get_start_address() - dmi_data.get_start_address()));
786 dmi_data.set_start_address(dmi_data_hole.get_start_address());
787 }
788 if (dmi_data.get_end_address() > dmi_data_hole.get_end_address()) {
789 dmi_data.set_end_address(dmi_data_hole.get_end_address());
790 }
791 record_dmi(id, dmi_data);
792 }
793 SCP_DEBUG(())
794 ("Providing DMI (status {:x}) {:x} - {:x}", status, dmi_data.get_start_address(), dmi_data.get_end_address());
795#if defined(THREAD_SAFE) and THREAD_SAFE
796 m_dmi_mutex.unlock();
797#endif
798 return status;
799 }
800
811 void invalidate_direct_mem_ptr(int id, sc_dt::uint64 start, sc_dt::uint64 end)
812 {
813 if (id_targets[id]->use_offset) {
814 start = id_targets[id]->address + start;
815 end = id_targets[id]->address + end;
816 }
817#if defined(THREAD_SAFE) and THREAD_SAFE
818 std::lock_guard<std::mutex> lock(m_dmi_mutex);
819#endif
820 invalidate_direct_mem_ptr_ts(id, start, end);
821 }
822
834 void invalidate_direct_mem_ptr_ts(int id, sc_dt::uint64 start, sc_dt::uint64 end)
835 {
836 auto it = m_dmi_info_map.upper_bound(start);
837
838 if (it != m_dmi_info_map.begin()) {
839 /*
840 * Start with the preceding region, as it may already cross the
841 * range we must invalidate.
842 */
843 it--;
844 }
845 std::set<int> initiators;
846
847 while (it != m_dmi_info_map.end()) {
848 tlm::tlm_dmi& r = it->second.dmi;
849
850 if (r.get_start_address() > end) {
851 /* We've got out of the invalidation range */
852 break;
853 }
854
855 if (r.get_end_address() < start) {
856 /* We are not in yet */
857 it++;
858 continue;
859 }
860 for (auto t : it->second.initiators) {
861 SCP_TRACE((DMI)) << "Queueing initiator " << t << " for invalidation, its bounds are [0x" << std::hex
862 << it->second.dmi.get_start_address() << " - 0x" << it->second.dmi.get_end_address()
863 << "]";
864 initiators.insert(t);
865 }
866 it = m_dmi_info_map.erase(it);
867 }
868 for (auto t : initiators) {
869 SCP_INFO((DMI)) << "Invalidating initiator " << t << " [0x" << std::hex << start << " - 0x" << end << "]";
870 target_socket[t]->invalidate_direct_mem_ptr(start, end);
871 }
872 }
873
874protected:
885 std::shared_ptr<target_info> decode_address(tlm::tlm_generic_payload& trans) override
886 {
887 lazy_initialize();
888
889 sc_dt::uint64 addr = trans.get_address();
890 return m_address_map.find(addr, trans.get_data_length());
891 }
892
898 virtual void before_end_of_elaboration() override
899 {
900 if (!lazy_init) lazy_initialize();
901 }
902
903private:
905 std::atomic<bool> m_initialized{ false };
907#if defined(THREAD_SAFE) and THREAD_SAFE
908 std::mutex m_init_mutex;
909#endif
910 std::list<std::string> get_matching_children(cci::cci_broker_handle broker, const std::string& prefix,
911 const std::vector<cci_name_value_pair>& list)
912 {
913 size_t prefix_len = prefix.length() + 1; // +1 for the dot separator
914 std::list<std::string> children;
915 for (const auto& p : list) {
916 if (p.first.find(prefix) == 0) {
917 // Extract the child name after the prefix
918 std::string child = p.first.substr(prefix_len, p.first.find(".", prefix_len) - prefix_len);
919 children.push_back(child);
920 }
921 }
922 children.sort();
923 children.unique();
924 return children;
925 }
926
940 void lazy_initialize() override
941 {
942 // Fast path: check if already initialized (lock-free)
943 if (m_initialized.load(std::memory_order_acquire)) {
944 return;
945 }
946
947#if defined(THREAD_SAFE) and THREAD_SAFE
948 // Slow path: acquire lock and initialize
949 std::lock_guard<std::mutex> lock(m_init_mutex);
950#endif
951
952 // Double-check: another thread may have initialized while we waited
953 if (m_initialized.load(std::memory_order_relaxed)) {
954 return;
955 }
956
957 // Perform initialization
958 // Get filtered range and convert to vector to materialize the results
959 auto all_alias_range = m_broker.get_unconsumed_preset_values([](const std::pair<std::string, cci_value>& iv) {
960 return iv.first.find(".aliases.") != std::string::npos;
961 });
962 std::vector<cci_name_value_pair> all_alias(all_alias_range.begin(), all_alias_range.end());
963
964 for (auto& ti_ptr : bound_targets) {
965 std::string name = ti_ptr->name;
966 std::string src;
967 if (gs::cci_get<std::string>(m_broker, ti_ptr->name + ".0", src)) {
968 // deal with an alias
969 if (m_broker.has_preset_value(ti_ptr->name + ".address") &&
970 m_broker.get_preset_cci_value(ti_ptr->name + ".address").is_number()) {
971 SCP_WARN((D[ti_ptr->index]), ti_ptr->name)
972 ("The configuration alias provided ({}) will be ignored as a valid address is also provided.", src);
973 } else {
974 if (src[0] == '&') src = (src.erase(0, 1)) + ".target_socket";
975 if (!m_broker.has_preset_value(src + ".address")) {
976 std::stringstream ss;
977 ss << "The configuration alias provided (" << src << ") can not be found.";
978 SC_REPORT_ERROR("router", ss.str().c_str());
979 }
980 name = src;
981 }
982 }
983
984 // Update the target_info object pointed to by ti_ptr
985 ti_ptr->address = gs::cci_get<uint64_t>(m_broker, name + ".address");
986 ti_ptr->size = gs::cci_get<uint64_t>(m_broker, name + ".size");
987 ti_ptr->use_offset = gs::cci_get_d<bool>(m_broker, name + ".relative_addresses", true);
988 ti_ptr->chained = gs::cci_get_d<bool>(m_broker, name + ".chained", false);
989 ti_ptr->priority = gs::cci_get_d<uint32_t>(m_broker, name + ".priority", 0);
990
991 SCP_INFO((D[ti_ptr->index]), ti_ptr->name)
992 << "Address map " << ti_ptr->name << " at address "
993 << "0x" << std::hex << ti_ptr->address << " size "
994 << "0x" << std::hex << ti_ptr->size << (ti_ptr->use_offset ? " (with relative address) " : " ")
995 << "priority : " << ti_ptr->priority;
996 if (ti_ptr->chained) SCP_DEBUG(())("{} is chained so debug will be suppressed", ti_ptr->name);
997
998 // Add the shared_ptr to the address map
999 m_address_map.add(ti_ptr);
1000 for (std::string n : get_matching_children(m_broker, (ti_ptr->name + ".aliases"), all_alias)) {
1001 std::string alias_name = ti_ptr->name + ".aliases." + n;
1002 uint64_t address = gs::cci_get<uint64_t>(m_broker, alias_name + ".address");
1003 uint64_t size = gs::cci_get<uint64_t>(m_broker, alias_name + ".size");
1004 SCP_INFO((D[ti_ptr->index]), ti_ptr->name)("Adding alias {} {:#x} (size: {})", alias_name, address,
1005 size);
1006
1007 // Create a new shared_ptr for the alias, copying from the current ti_ptr state
1008 std::shared_ptr<target_info> alias_ti_ptr = std::make_shared<target_info>(*ti_ptr);
1009 alias_ti_ptr->address = address;
1010 alias_ti_ptr->size = size;
1011 alias_ti_ptr->name = alias_name;
1012 alias_targets.push_back(alias_ti_ptr); // Store shared_ptr in alias_targets
1013
1014 // Add aliases to the address map
1015 m_address_map.add(alias_ti_ptr);
1016 }
1017 id_targets.push_back(ti_ptr); // Store shared_ptr in id_targets
1018 }
1019
1020 // Mark as initialized (release semantics ensures all writes are visible)
1021 m_initialized.store(true, std::memory_order_release);
1022 }
1023
1025 cci::cci_param<bool> lazy_init;
1026
1036public:
1037 explicit router(const sc_core::sc_module_name& nm, cci::cci_broker_handle broker = cci::cci_get_broker())
1038 : sc_core::sc_module(nm)
1039 , initiator_socket("initiator_socket", [&](std::string s) -> void { register_boundto(s); })
1040 , target_socket("target_socket")
1041 , m_address_map()
1042 , m_broker(broker)
1043 , lazy_init("lazy_init", false, "Initialize the router lazily (eg. during simulation rather than BEOL)")
1044 {
1045 SCP_DEBUG(()) << "router constructed";
1046
1047 target_socket.register_b_transport(this, &router<BUSWIDTH, CacheType>::b_transport);
1048 target_socket.register_transport_dbg(this, &router<BUSWIDTH, CacheType>::transport_dbg);
1049 target_socket.register_get_direct_mem_ptr(this, &router<BUSWIDTH, CacheType>::get_direct_mem_ptr);
1050 initiator_socket.register_invalidate_direct_mem_ptr(this,
1052 SCP_DEBUG((DMI)) << "router Initializing DMI SCP reporting";
1053 }
1054
1056 router() = delete;
1057
1059 router(const router&) = delete;
1060
1061public:
1064 ~router() { m_pathIDPool.clear(); }
1065
1078 void add_target(TargetSocket& t, const uint64_t address, uint64_t size, bool masked = true,
1079 unsigned int priority = 0)
1080 {
1081 std::string s = gs::router_if<BUSWIDTH>::nameFromSocket(t.get_base_export().name());
1082 if (!m_broker.has_preset_value(s + ".address")) {
1083 m_broker.set_preset_cci_value(s + ".address", cci::cci_value(address));
1084 }
1085 if (!m_broker.has_preset_value(s + ".size")) {
1086 m_broker.set_preset_cci_value(s + ".size", cci::cci_value(size));
1087 }
1088 if (!m_broker.has_preset_value(s + ".relative_addresses")) {
1089 m_broker.set_preset_cci_value(s + ".relative_addresses", cci::cci_value(masked));
1090 }
1091 if (!m_broker.has_preset_value(s + ".priority")) {
1092 SCP_DEBUG(())("Setting prio to {}", priority);
1093 m_broker.set_preset_cci_value(s + ".priority", cci::cci_value(priority));
1094 }
1095 initiator_socket.bind(t);
1096 }
1097
1106 virtual void add_initiator(InitiatorSocket& i)
1107 {
1108 // hand bind the port/exports as we are using base classes
1109 (i.get_base_port())(target_socket.get_base_interface());
1110 (target_socket.get_base_port())(i.get_base_interface());
1111 }
1112
1121 void get_cache_stats(uint64_t& hits, uint64_t& misses) const { m_address_map.get_cache_stats(hits, misses); }
1122
1129 void reset_cache_stats() { m_address_map.reset_cache_stats(); }
1130};
1131} // namespace gs
1132
1133extern "C" void module_register();
1134#endif
Definition target.h:160
Definition router.h:57
AddrMapNoCache - A cache implementation that never caches (always misses).
Definition router.h:76
bool get(const Key &key, Value &value) override
Always returns false (cache miss)
Definition router.h:79
void clear() override
Does nothing (nothing to clear)
Definition router.h:94
void put(const Key &key, const Value &value, uint64_t size) override
Does nothing (no caching)
Definition router.h:87
Definition pathid_extension.h:26
Definition router_if.h:24
A SystemC TLM router module for transaction routing based on address.
Definition router.h:114
virtual void before_end_of_elaboration() override
Called before end of elaboration to ensure lazy initialization.
Definition router.h:898
virtual void add_initiator(InitiatorSocket &i)
Adds an initiator to the router.
Definition router.h:1106
router(const sc_core::sc_module_name &nm, cci::cci_broker_handle broker=cci::cci_get_broker())
Constructor for the router module.
Definition router.h:1037
tlm_utils::multi_passthrough_target_socket< router< BUSWIDTH, CacheType >, BUSWIDTH > target_socket
Target socket to receive transactions from initiators.
Definition router.h:589
router()=delete
Deleted default constructor to enforce named instantiation.
initiator_socket_type initiator_socket
Initiator socket to connect to targets.
Definition router.h:587
void get_cache_stats(uint64_t &hits, uint64_t &misses) const
Get cache statistics (hits and misses).
Definition router.h:1121
void add_target(TargetSocket &t, const uint64_t address, uint64_t size, bool masked=true, unsigned int priority=0)
Adds a target to the router's address map.
Definition router.h:1078
std::shared_ptr< target_info > decode_address(tlm::tlm_generic_payload &trans) override
Decodes the address from a TLM generic payload to find the target.
Definition router.h:885
router(const router &)=delete
Deleted copy constructor.
cci::cci_broker_handle m_broker
CCI broker handle for configuration parameters.
Definition router.h:591
void reset_cache_stats()
Reset cache statistics to zero.
Definition router.h:1129
~router()
Destructor for the router module. Cleans up any remaining PathIDExtension objects in the pool.
Definition router.h:1064
Tool which reads a Lua configuration file and sets parameters.
Definition biflow.cc:10
Definition router_if.h:49