quic/qbox
Loading...
Searching...
No Matches
router.h
1/*
2 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All Rights Reserved.
3 * Author: GreenSocs 2022
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8#ifndef _GREENSOCS_BASE_COMPONENTS_ROUTER_H
9#define _GREENSOCS_BASE_COMPONENTS_ROUTER_H
10
11#include <cinttypes>
12#include <utility>
13#include <vector>
14#include <map>
15#include <set> // Required for std::set
16#include <limits> // Required for std::numeric_limits
17#include <memory> // Required for std::shared_ptrs
18#include <unordered_map> // Required for std::unordered_map
19#include <list> // Required for std::list
20#include <functional> // Required for std::less
21
22/* Theoretically a DMI request can be failed with no ill effects, and to protect against re-entrant code
23 * between a DMI invalidate and a DMI request on separate threads, effectively requiring work to be done
24 * on the same thread, we make use of this by 'try_lock'ing and failing the DMI. However this has the
25 * negative side effect that up-stream models may not get DMI's when they expect them, which at the very
26 * least would be a performance hit.
27 * Define this as true if you require protection against re-entrant models.
28 */
29#define THREAD_SAFE_REENTRANT false
30#define THREAD_SAFE true
31#if defined(THREAD_SAFE) and THREAD_SAFE
32#include <mutex>
33#include <shared_mutex>
34#endif
35#include <atomic>
36
37#include <cci_configuration>
38#include <systemc>
39#include <tlm>
40#include <scp/report.h>
41#include <scp/helpers.h>
42#include <cciutils.h>
43
44#include <tlm_utils/multi_passthrough_initiator_socket.h>
45#include <tlm_utils/multi_passthrough_target_socket.h>
46#include <tlm-extensions/pathid_extension.h>
47#include <tlm-extensions/underlying-dmi.h>
48
49#include <router_if.h>
50#include <module_factory_registery.h>
51#include <tlm_sockets_buswidth.h>
52
53namespace gs {
54
55template <typename Key, typename Value>
57{
58public:
59 virtual ~AddrMapCacheBase() = default;
60 virtual bool get(const Key& key, Value& value) = 0;
61 virtual void put(const Key& key, const Value& value, uint64_t size) = 0;
62 virtual void clear() = 0;
63
64 // Statistics interface
65 virtual uint64_t get_hits() const = 0;
66 virtual uint64_t get_misses() const = 0;
67 virtual void reset_stats() = 0;
68};
69
74template <typename Key, typename Value>
75class AddrMapNoCache : public AddrMapCacheBase<Key, Value>
76{
77public:
79 bool get(const Key& key, Value& value) override
80 {
81 (void)key; // Suppress unused parameter warning
82 (void)value; // Suppress unused parameter warning
83 return false;
84 }
85
87 void put(const Key& key, const Value& value, [[maybe_unused]] uint64_t size) override
88 {
89 (void)key; // Suppress unused parameter warning
90 (void)value; // Suppress unused parameter warning
91 }
92
94 void clear() override {}
95
96 // Statistics interface implementation
97 uint64_t get_hits() const override { return 0; }
98 uint64_t get_misses() const override { return 0; }
99 void reset_stats() override {}
100};
101
112template <unsigned int BUSWIDTH = DEFAULT_TLM_BUSWIDTH, template <typename, typename> class CacheType = AddrMapNoCache>
113class router : public sc_core::sc_module, public gs::router_if<BUSWIDTH>
114{
116 using TargetSocket = tlm::tlm_base_target_socket_b<BUSWIDTH, tlm::tlm_fw_transport_if<>,
117 tlm::tlm_bw_transport_if<>>;
119 using InitiatorSocket = tlm::tlm_base_initiator_socket_b<BUSWIDTH, tlm::tlm_fw_transport_if<>,
120 tlm::tlm_bw_transport_if<>>;
124 using initiator_socket_type = typename gs::router<
127 using gs::router_if<BUSWIDTH>::bound_targets;
128
148 template <typename TargetInfoType>
149 class addressMap
150 {
151 private:
158 struct Region {
159 uint64_t start;
160 uint64_t end;
161 std::shared_ptr<TargetInfoType> target;
162
163 Region(uint64_t s, uint64_t e, std::shared_ptr<TargetInfoType> t): start(s), end(e), target(t) {}
164 };
165
167 std::map<uint64_t, Region> m_regions;
168
169 CacheImpl m_cache;
170
188 void split_and_resolve(uint64_t new_start, uint64_t new_end, std::shared_ptr<TargetInfoType> new_target)
189 {
190 // Step 1: Collect all existing regions that overlap with the new region
191 std::vector<Region> overlapping_regions;
192 std::vector<uint64_t> keys_to_remove;
193
194 // Find overlapping regions using interval overlap test: (a.start < b.end && a.end > b.start)
195 for (const auto& [start_addr, region] : m_regions) {
196 if (region.start < new_end && region.end > new_start) {
197 overlapping_regions.push_back(region);
198 keys_to_remove.push_back(start_addr);
199 }
200 }
201
202 // Step 2: Remove overlapping regions from the map (they'll be re-added after splitting)
203 for (uint64_t key : keys_to_remove) {
204 m_regions.erase(key);
205 }
206
207 // Step 3: Create intervals for all regions that need to be resolved
208 struct Interval {
209 uint64_t start, end;
210 std::shared_ptr<TargetInfoType> target;
211 uint32_t priority;
212
213 Interval(uint64_t s, uint64_t e, std::shared_ptr<TargetInfoType> t)
214 : start(s), end(e), target(t), priority(t->priority)
215 {
216 }
217 };
218
219 std::vector<Interval> intervals;
220 intervals.reserve(overlapping_regions.size() + 1); // Optimize allocation
221
222 // Add the new region
223 intervals.emplace_back(new_start, new_end, new_target);
224
225 // Add all overlapping existing regions
226 for (const Region& region : overlapping_regions) {
227 intervals.emplace_back(region.start, region.end, region.target);
228 }
229
230 // Step 4: Create sorted list of all boundary points for segmentation
231 std::set<uint64_t> boundaries;
232 for (const Interval& interval : intervals) {
233 boundaries.insert(interval.start);
234 boundaries.insert(interval.end);
235 }
236
237 // Step 5: Process each segment between boundaries and resolve conflicts
238 auto boundary_it = boundaries.begin();
239 while (boundary_it != boundaries.end()) {
241 ++boundary_it;
242 if (boundary_it == boundaries.end()) break;
244
245 // Find the highest priority target for this segment
246 std::shared_ptr<TargetInfoType> best_target = nullptr;
248
249 for (const Interval& interval : intervals) {
250 // Check if this interval covers the current segment
251 if (interval.start <= seg_start && interval.end >= seg_end) {
252 if (interval.priority < best_priority) {
253 best_priority = interval.priority;
254 best_target = interval.target;
255 }
256 }
257 }
258
259 // Add the resolved segment to the final map
260 if (best_target) {
261 m_regions.emplace(seg_start, Region(seg_start, seg_end, best_target));
262 }
263 }
264
265 // Step 6: Check for completely shadowed regions and issue warnings
266 for (const Interval& interval : intervals) {
267 // Skip the newly added region (it can't be shadowed by itself)
268 if (interval.start == new_start && interval.end == new_end && interval.target == new_target) {
269 continue;
270 }
271
272 // Check if this interval is completely covered by higher priority regions
273 // We do this by checking if any part of this interval "wins" in the final mapping
274 bool completely_shadowed = true;
275
276 // Look through the final resolved regions to see if any belong to this interval's target
277 for (const auto& [seg_start, region] : m_regions) {
278 // Check if this final region overlaps with our interval and belongs to our target
279 if (region.target == interval.target && region.start < interval.end &&
280 region.end > interval.start) {
281 // This interval has at least some part that's not shadowed
282 completely_shadowed = false;
283 break;
284 }
285 }
286
288 std::stringstream ss;
289 ss << "Region '" << interval.target->name << "' (0x" << std::hex << interval.start << "-0x"
290 << (interval.end - 1) << ", priority " << std::dec << interval.priority
291 << ") is completely shadowed by higher priority regions and will never be accessed";
292 SC_REPORT_WARNING("addressMap", ss.str().c_str());
293 }
294 }
295 }
296
297 public:
299 addressMap() = default;
300
315 void add(std::shared_ptr<TargetInfoType> t_info)
316 {
317 uint64_t start = t_info->address;
318 uint64_t end = t_info->address + t_info->size;
319
320 split_and_resolve(start, end, t_info);
321
322 // Clear cache after map modification to ensure consistency
323 m_cache.clear();
324 }
325
339 std::shared_ptr<TargetInfoType> find(uint64_t address, uint64_t size)
340 {
341 // Fast path: check LRU cache first
342 std::shared_ptr<TargetInfoType> cached_result;
343 if (m_cache.get(address, cached_result)) {
344 return cached_result;
345 }
346
347 // Slow path: search the map using upper_bound for O(log n) lookup
348 // upper_bound finds the first region with start > address
349 auto it = m_regions.upper_bound(address);
350 if (it != m_regions.begin()) {
351 --it; // Move to the region that might contain our address
352
353 // Check if address falls within this region [start, end)
354 if (address >= it->second.start && address < it->second.end) {
355 // Cache the result for future lookups
356 m_cache.put(address, it->second.target, size);
357 return it->second.target;
358 }
359 }
360
361 // Address not found - cache the negative result to avoid repeated lookups
362 m_cache.put(address, nullptr, size);
363 return nullptr;
364 }
365
382 std::shared_ptr<TargetInfoType> find_region(uint64_t addr, tlm::tlm_dmi& dmi)
383 {
384 // First try to find a mapped region using the same logic as find()
385 auto it = m_regions.upper_bound(addr);
386 if (it != m_regions.begin()) {
387 --it;
388 if (addr >= it->second.start && addr < it->second.end) {
389 // Return the segment boundaries. After split_and_resolve, adjacent
390 // same-target segments are merged, so this segment represents the
391 // full contiguous range owned by this target. We must not use the
392 // original target range as it may span across higher-priority targets.
393 std::shared_ptr<TargetInfoType> target = it->second.target;
394 dmi.set_start_address(it->second.start);
395 dmi.set_end_address(it->second.end - 1); // inclusive end (Region.end is exclusive)
396 return target;
397 }
398 }
399
400 // Address is in a hole - calculate hole boundaries for DMI invalidation
402 uint64_t hole_end = std::numeric_limits<uint64_t>::max();
403
404 // Find the region immediately before this address
405 it = m_regions.upper_bound(addr);
406 if (it != m_regions.begin()) {
407 --it;
408 if (it->second.end <= addr) {
409 hole_start = it->second.end;
410 }
411 }
412
413 // Find the region immediately after this address
414 it = m_regions.upper_bound(addr);
415 if (it != m_regions.end()) {
416 hole_end = it->second.start;
417 }
418
419 // Set DMI boundaries for the hole
420 dmi.set_start_address(hole_start);
421 dmi.set_end_address(hole_end == 0 ? 0 : hole_end - 1); // Handle edge case
422 return nullptr;
423 }
424
430 void get_cache_stats(uint64_t& hits, uint64_t& misses) const
431 {
432 hits = m_cache.get_hits();
433 misses = m_cache.get_misses();
434 }
435
439 void reset_cache_stats() { m_cache.reset_stats(); }
440 };
441
442 SCP_LOGGER_VECTOR(D);
443 SCP_LOGGER(());
444 SCP_LOGGER((DMI), "dmi");
445
446private:
447#if defined(THREAD_SAFE) and THREAD_SAFE
449 std::mutex m_dmi_mutex;
450#endif
452 struct dmi_info {
453 std::set<int> initiators;
454 tlm::tlm_dmi dmi;
456 dmi_info(tlm::tlm_dmi& _dmi) { dmi = _dmi; }
457 };
459 std::map<uint64_t, dmi_info> m_dmi_info_map;
460
471 dmi_info* in_dmi_cache(tlm::tlm_dmi& dmi)
472 {
473 auto it = m_dmi_info_map.find(dmi.get_start_address());
474 if (it != m_dmi_info_map.end()) {
475 if (it->second.dmi.get_end_address() != dmi.get_end_address()) {
476 // Defensive guard: unreachable in normal operation. The only caller
477 // (record_dmi) detects differing end addresses and calls
478 // invalidate_direct_mem_ptr_ts to erase the old entry before reaching here.
479 std::stringstream ss;
480 ss << "Can't handle that: DMI overlap with differing end address (0x" << std::hex
481 << it->second.dmi.get_end_address() << " vs 0x" << dmi.get_end_address() << ")";
482 SC_REPORT_ERROR("DMI", ss.str().c_str());
483 }
484 return &(it->second);
485 }
486 auto insit = m_dmi_info_map.emplace(dmi.get_start_address(), dmi_info(dmi));
487 return &(insit.first->second);
488 }
489
500 void record_dmi(int id, tlm::tlm_dmi& dmi)
501 {
502 auto it = m_dmi_info_map.find(dmi.get_start_address());
503 if (it != m_dmi_info_map.end()) {
504 if (it->second.dmi.get_end_address() != dmi.get_end_address()) {
505 SCP_WARN((DMI)) << "A new DMI overlaps with an old one, invalidating the old one";
506 invalidate_direct_mem_ptr_ts(0, dmi.get_start_address(),
507 dmi.get_end_address()); // id will be ignored
508 }
509 }
510
511 dmi_info* dinfo = in_dmi_cache(dmi);
512 dinfo->initiators.insert(id);
513 }
514
524 void register_boundto(std::string s) override
525 {
527 // Create a shared_ptr for the new target_info
528 std::shared_ptr<target_info> ti_ptr = std::make_shared<target_info>();
529 ti_ptr->name = s;
530 ti_ptr->index = bound_targets.size(); // Current size will be its index
532 SCP_DEBUG((D[ti_ptr->index])) << "Connecting : " << ti_ptr->name;
533 ti_ptr->chained = false;
534 std::string tmp = name();
535 int i;
536 for (i = 0; i < tmp.length(); i++)
537 if (s[i] != tmp[i]) break;
538 ti_ptr->shortname = s.substr(i);
539 bound_targets.push_back(ti_ptr); // Add shared_ptr to bound_targets
540 }
541
552 std::string txn_tostring(const target_info* ti, tlm::tlm_generic_payload& trans)
553 {
554 std::stringstream info;
555 const char* cmd = "UNKOWN";
556 switch (trans.get_command()) {
557 case tlm::TLM_IGNORE_COMMAND:
558 info << "IGNORE ";
559 break;
560 case tlm::TLM_WRITE_COMMAND:
561 info << "WRITE ";
562 break;
563 case tlm::TLM_READ_COMMAND:
564 info << "READ ";
565 break;
566 }
567
568 info << " address:"
569 << "0x" << std::hex << trans.get_address();
570 info << " len:" << trans.get_data_length();
571 unsigned char* ptr = trans.get_data_ptr();
572 if ((trans.get_command() == tlm::TLM_READ_COMMAND && trans.get_response_status() == tlm::TLM_OK_RESPONSE) ||
573 (trans.get_command() == tlm::TLM_WRITE_COMMAND &&
574 trans.get_response_status() == tlm::TLM_INCOMPLETE_RESPONSE)) {
575 info << " data:0x";
576 for (int i = trans.get_data_length(); i; i--) {
577 info << std::setw(2) << std::setfill('0') << std::hex << (unsigned int)(ptr[i - 1]);
578 }
579 }
580 info << " " << trans.get_response_string() << " ";
581 for (int i = 0; i < tlm::max_num_extensions(); i++) {
582 if (trans.get_extension(i)) {
583 info << " extn:" << i;
584 }
585 }
586 return info.str();
587 }
588
589public:
591 initiator_socket_type initiator_socket;
593 tlm_utils::multi_passthrough_target_socket<router<BUSWIDTH, CacheType>, BUSWIDTH> target_socket;
595 cci::cci_broker_handle m_broker;
596
597private:
599 std::vector<std::shared_ptr<target_info>> alias_targets;
601 addressMap<target_info> m_address_map;
603 std::vector<std::shared_ptr<target_info>> id_targets;
604
606 std::vector<PathIDExtension*> m_pathIDPool;
607#if defined(THREAD_SAFE) and THREAD_SAFE
609 std::mutex m_pool_mutex;
610#endif
611
621 void stamp_txn(int id, tlm::tlm_generic_payload& txn)
622 {
623 PathIDExtension* ext = nullptr;
624 txn.get_extension(ext);
625 if (ext == nullptr) {
626#if defined(THREAD_SAFE) and THREAD_SAFE
627 std::lock_guard<std::mutex> l(m_pool_mutex);
628#endif
629 if (m_pathIDPool.size() == 0) {
630 ext = new PathIDExtension();
631 } else {
632 ext = m_pathIDPool.back();
633 m_pathIDPool.pop_back();
634 }
635 txn.set_extension(ext);
636 }
637 ext->push_back(id);
638 }
639
649 void unstamp_txn(int id, tlm::tlm_generic_payload& txn)
650 {
652 txn.get_extension(ext);
653 assert(ext);
654 assert(ext->back() == id);
655 ext->pop_back();
656 if (ext->size() == 0) {
657#if defined(THREAD_SAFE) and THREAD_SAFE
658 std::lock_guard<std::mutex> l(m_pool_mutex);
659#endif
660 txn.clear_extension(ext);
661 m_pathIDPool.push_back(ext);
662 }
663 }
664
677 void b_transport(int id, tlm::tlm_generic_payload& trans, sc_core::sc_time& delay)
678 {
679 sc_dt::uint64 addr = trans.get_address();
680 auto ti = decode_address(trans);
681 if (!ti) {
682 SCP_WARN(())("Attempt to access unknown register at offset 0x{:x}", addr);
683 trans.set_response_status(tlm::TLM_ADDRESS_ERROR_RESPONSE);
684 return;
685 }
686
687 stamp_txn(id, trans);
688 if (!ti->chained) {
689 SCP_TRACE((D[ti->index]), ti->name) << "Start b_transport :" << txn_tostring(ti.get(), trans);
690 }
691 if (trans.get_response_status() >= tlm::TLM_INCOMPLETE_RESPONSE) {
692 if (ti->use_offset) trans.set_address(addr - ti->address);
693
694 initiator_socket[ti->index]->b_transport(trans, delay);
695
696 if (ti->use_offset) trans.set_address(addr);
697 }
698 if (!ti->chained) {
699 SCP_TRACE((D[ti->index]), ti->name) << "Completed b_transport :" << txn_tostring(ti.get(), trans);
700 }
701 unstamp_txn(id, trans);
702 }
703
715 unsigned int transport_dbg(int id, tlm::tlm_generic_payload& trans)
716 {
717 // Ensure router is initialized (thread-safe)
718 lazy_initialize();
719
720 sc_dt::uint64 addr = trans.get_address();
721 auto ti = decode_address(trans);
722 if (!ti) {
723 trans.set_response_status(tlm::TLM_ADDRESS_ERROR_RESPONSE);
724 return 0;
725 }
726
727 if (ti->use_offset) trans.set_address(addr - ti->address);
728 SCP_TRACE((D[ti->index]), ti->name) << "calling dbg_transport : " << scp::scp_txn_tostring(trans);
729 unsigned int ret = initiator_socket[ti->index]->transport_dbg(trans);
730 if (ti->use_offset) trans.set_address(addr);
731 return ret;
732 }
733
746 bool get_direct_mem_ptr(int id, tlm::tlm_generic_payload& trans, tlm::tlm_dmi& dmi_data)
747 {
748 // Ensure router is initialized (thread-safe)
749 lazy_initialize();
750
751 sc_dt::uint64 addr = trans.get_address();
752
753 tlm::tlm_dmi dmi_data_hole;
754 auto ti = m_address_map.find_region(addr, dmi_data_hole);
755
756 UnderlyingDMITlmExtension* u_dmi;
757 trans.get_extension(u_dmi);
758 if (u_dmi) {
759 SCP_DEBUG(())
760 ("DMI info 0x{:x} 0x{:x} {}", dmi_data_hole.get_start_address(), dmi_data_hole.get_end_address(),
761 (ti ? "mapped" : "nomap"));
762 u_dmi->add_dmi(this, dmi_data_hole, (ti ? gs::tlm_dmi_ex::dmi_mapped : gs::tlm_dmi_ex::dmi_nomap));
763 }
764
765 if (!ti) {
766 return false;
767 }
768#if defined(THREAD_SAFE_REENTRANT) and THREAD_SAFE_REENTRANT
769 if (!m_dmi_mutex.try_lock()) { // if we're busy invalidating, dont grant DMI's
770 return false;
771 }
772#else
773 m_dmi_mutex.lock();
774#endif
775
776 if (ti->use_offset) trans.set_address(addr - ti->address);
777 SCP_TRACE((D[ti->index]), ti->name) << "calling get_direct_mem_ptr : " << scp::scp_txn_tostring(trans);
778 bool status = initiator_socket[ti->index]->get_direct_mem_ptr(trans, dmi_data);
779 if (ti->use_offset) trans.set_address(addr);
780 if (status) {
781 if (ti->use_offset) {
782 assert(dmi_data.get_start_address() < ti->size);
783 dmi_data.set_start_address(ti->address + dmi_data.get_start_address());
784 dmi_data.set_end_address(ti->address + dmi_data.get_end_address());
785 }
786 /* ensure we dont overspill the 'hole' we have in the address map */
787 if (dmi_data.get_start_address() < dmi_data_hole.get_start_address()) {
788 dmi_data.set_dmi_ptr(dmi_data.get_dmi_ptr() +
789 (dmi_data_hole.get_start_address() - dmi_data.get_start_address()));
790 dmi_data.set_start_address(dmi_data_hole.get_start_address());
791 }
792 if (dmi_data.get_end_address() > dmi_data_hole.get_end_address()) {
793 dmi_data.set_end_address(dmi_data_hole.get_end_address());
794 }
795 record_dmi(id, dmi_data);
796 }
797 SCP_DEBUG(())
798 ("Providing DMI (status {:x}) {:x} - {:x}", status, dmi_data.get_start_address(), dmi_data.get_end_address());
799#if defined(THREAD_SAFE) and THREAD_SAFE
800 m_dmi_mutex.unlock();
801#endif
802 return status;
803 }
804
815 void invalidate_direct_mem_ptr(int id, sc_dt::uint64 start, sc_dt::uint64 end)
816 {
817 if (id_targets[id]->use_offset) {
818 start = id_targets[id]->address + start;
819 end = id_targets[id]->address + end;
820 }
821#if defined(THREAD_SAFE) and THREAD_SAFE
822 std::lock_guard<std::mutex> lock(m_dmi_mutex);
823#endif
824 invalidate_direct_mem_ptr_ts(id, start, end);
825 }
826
838 void invalidate_direct_mem_ptr_ts(int id, sc_dt::uint64 start, sc_dt::uint64 end)
839 {
840 auto it = m_dmi_info_map.upper_bound(start);
841
842 if (it != m_dmi_info_map.begin()) {
843 /*
844 * Start with the preceding region, as it may already cross the
845 * range we must invalidate.
846 */
847 it--;
848 }
849 std::set<int> initiators;
850
851 while (it != m_dmi_info_map.end()) {
852 tlm::tlm_dmi& r = it->second.dmi;
853
854 if (r.get_start_address() > end) {
855 /* We've got out of the invalidation range */
856 break;
857 }
858
859 if (r.get_end_address() < start) {
860 /* We are not in yet */
861 it++;
862 continue;
863 }
864 for (auto t : it->second.initiators) {
865 SCP_TRACE((DMI)) << "Queueing initiator " << t << " for invalidation, its bounds are [0x" << std::hex
866 << it->second.dmi.get_start_address() << " - 0x" << it->second.dmi.get_end_address()
867 << "]";
868 initiators.insert(t);
869 }
870 it = m_dmi_info_map.erase(it);
871 }
872 for (auto t : initiators) {
873 SCP_INFO((DMI)) << "Invalidating initiator " << t << " [0x" << std::hex << start << " - 0x" << end << "]";
874 target_socket[t]->invalidate_direct_mem_ptr(start, end);
875 }
876 }
877
878protected:
889 std::shared_ptr<target_info> decode_address(tlm::tlm_generic_payload& trans) override
890 {
891 lazy_initialize();
892
893 sc_dt::uint64 addr = trans.get_address();
894 return m_address_map.find(addr, trans.get_data_length());
895 }
896
902 virtual void before_end_of_elaboration() override
903 {
904 if (!lazy_init) lazy_initialize();
905 }
906
907private:
909 std::atomic<bool> m_initialized{ false };
911#if defined(THREAD_SAFE) and THREAD_SAFE
912 std::mutex m_init_mutex;
913#endif
914 std::list<std::string> get_matching_children(cci::cci_broker_handle broker, const std::string& prefix,
915 const std::vector<cci_name_value_pair>& list)
916 {
917 size_t prefix_len = prefix.length() + 1; // +1 for the dot separator
918 std::list<std::string> children;
919 for (const auto& p : list) {
920 if (p.first.find(prefix) == 0) {
921 // Extract the child name after the prefix
922 std::string child = p.first.substr(prefix_len, p.first.find(".", prefix_len) - prefix_len);
923 children.push_back(child);
924 }
925 }
926 children.sort();
927 children.unique();
928 return children;
929 }
930
944 void lazy_initialize() override
945 {
946 // Fast path: check if already initialized (lock-free)
947 if (m_initialized.load(std::memory_order_acquire)) {
948 return;
949 }
950
951#if defined(THREAD_SAFE) and THREAD_SAFE
952 // Slow path: acquire lock and initialize
953 std::lock_guard<std::mutex> lock(m_init_mutex);
954#endif
955
956 // Double-check: another thread may have initialized while we waited
957 if (m_initialized.load(std::memory_order_relaxed)) {
958 return;
959 }
960
961 // Perform initialization
962 // Get filtered range and convert to vector to materialize the results
963 auto all_alias_range = m_broker.get_unconsumed_preset_values([](const std::pair<std::string, cci_value>& iv) {
964 return iv.first.find(".aliases.") != std::string::npos;
965 });
966 std::vector<cci_name_value_pair> all_alias(all_alias_range.begin(), all_alias_range.end());
967
968 for (auto& ti_ptr : bound_targets) {
969 std::string name = ti_ptr->name;
970 std::string src;
971 if (gs::cci_get<std::string>(m_broker, ti_ptr->name + ".0", src)) {
972 // deal with an alias
973 if (m_broker.has_preset_value(ti_ptr->name + ".address") &&
974 m_broker.get_preset_cci_value(ti_ptr->name + ".address").is_number()) {
975 SCP_WARN((D[ti_ptr->index]), ti_ptr->name)
976 ("The configuration alias provided ({}) will be ignored as a valid address is also provided.", src);
977 } else {
978 if (src[0] == '&') src = (src.erase(0, 1)) + ".target_socket";
979 if (!m_broker.has_preset_value(src + ".address")) {
980 std::stringstream ss;
981 ss << "The configuration alias provided (" << src << ") can not be found.";
982 SC_REPORT_ERROR("router", ss.str().c_str());
983 }
984 name = src;
985 }
986 }
987
988 // Update the target_info object pointed to by ti_ptr
989 ti_ptr->address = gs::cci_get<uint64_t>(m_broker, name + ".address");
990 ti_ptr->size = gs::cci_get<uint64_t>(m_broker, name + ".size");
991 ti_ptr->use_offset = gs::cci_get_d<bool>(m_broker, name + ".relative_addresses", true);
992 ti_ptr->chained = gs::cci_get_d<bool>(m_broker, name + ".chained", false);
993 ti_ptr->priority = gs::cci_get_d<uint32_t>(m_broker, name + ".priority", 0);
994
995 SCP_INFO((D[ti_ptr->index]), ti_ptr->name)
996 << "Address map " << ti_ptr->name << " at address "
997 << "0x" << std::hex << ti_ptr->address << " size "
998 << "0x" << std::hex << ti_ptr->size << (ti_ptr->use_offset ? " (with relative address) " : " ")
999 << "priority : " << ti_ptr->priority;
1000 if (ti_ptr->chained) SCP_DEBUG(())("{} is chained so debug will be suppressed", ti_ptr->name);
1001
1002 // Add the shared_ptr to the address map
1003 m_address_map.add(ti_ptr);
1004 for (std::string n : get_matching_children(m_broker, (ti_ptr->name + ".aliases"), all_alias)) {
1005 std::string alias_name = ti_ptr->name + ".aliases." + n;
1006 uint64_t address = gs::cci_get<uint64_t>(m_broker, alias_name + ".address");
1007 uint64_t size = gs::cci_get<uint64_t>(m_broker, alias_name + ".size");
1008 SCP_INFO((D[ti_ptr->index]), ti_ptr->name)("Adding alias {} {:#x} (size: {})", alias_name, address,
1009 size);
1010
1011 // Create a new shared_ptr for the alias, copying from the current ti_ptr state
1012 std::shared_ptr<target_info> alias_ti_ptr = std::make_shared<target_info>(*ti_ptr);
1013 alias_ti_ptr->address = address;
1014 alias_ti_ptr->size = size;
1015 alias_ti_ptr->name = alias_name;
1016 alias_targets.push_back(alias_ti_ptr); // Store shared_ptr in alias_targets
1017
1018 // Add aliases to the address map
1019 m_address_map.add(alias_ti_ptr);
1020 }
1021 id_targets.push_back(ti_ptr); // Store shared_ptr in id_targets
1022 }
1023
1024 // Mark as initialized (release semantics ensures all writes are visible)
1025 m_initialized.store(true, std::memory_order_release);
1026 }
1027
1029 cci::cci_param<bool> lazy_init;
1030
1040public:
1041 explicit router(const sc_core::sc_module_name& nm, cci::cci_broker_handle broker = cci::cci_get_broker())
1042 : sc_core::sc_module(nm)
1043 , initiator_socket("initiator_socket", [&](std::string s) -> void { register_boundto(s); })
1044 , target_socket("target_socket")
1045 , m_address_map()
1046 , m_broker(broker)
1047 , lazy_init("lazy_init", false, "Initialize the router lazily (eg. during simulation rather than BEOL)")
1048 {
1049 SCP_DEBUG(()) << "router constructed";
1050
1051 target_socket.register_b_transport(this, &router<BUSWIDTH, CacheType>::b_transport);
1052 target_socket.register_transport_dbg(this, &router<BUSWIDTH, CacheType>::transport_dbg);
1053 target_socket.register_get_direct_mem_ptr(this, &router<BUSWIDTH, CacheType>::get_direct_mem_ptr);
1054 initiator_socket.register_invalidate_direct_mem_ptr(this,
1056 SCP_DEBUG((DMI)) << "router Initializing DMI SCP reporting";
1057 }
1058
1060 router() = delete;
1061
1063 router(const router&) = delete;
1064
1065public:
1068 ~router() { m_pathIDPool.clear(); }
1069
1082 void add_target(TargetSocket& t, const uint64_t address, uint64_t size, bool masked = true,
1083 unsigned int priority = 0)
1084 {
1085 std::string s = gs::router_if<BUSWIDTH>::nameFromSocket(t.get_base_export().name());
1086 if (!m_broker.has_preset_value(s + ".address")) {
1087 m_broker.set_preset_cci_value(s + ".address", cci::cci_value(address));
1088 }
1089 if (!m_broker.has_preset_value(s + ".size")) {
1090 m_broker.set_preset_cci_value(s + ".size", cci::cci_value(size));
1091 }
1092 if (!m_broker.has_preset_value(s + ".relative_addresses")) {
1093 m_broker.set_preset_cci_value(s + ".relative_addresses", cci::cci_value(masked));
1094 }
1095 if (!m_broker.has_preset_value(s + ".priority")) {
1096 SCP_DEBUG(())("Setting prio to {}", priority);
1097 m_broker.set_preset_cci_value(s + ".priority", cci::cci_value(priority));
1098 }
1099 initiator_socket.bind(t);
1100 }
1101
1110 virtual void add_initiator(InitiatorSocket& i)
1111 {
1112 // hand bind the port/exports as we are using base classes
1113 (i.get_base_port())(target_socket.get_base_interface());
1114 (target_socket.get_base_port())(i.get_base_interface());
1115 }
1116
1125 void get_cache_stats(uint64_t& hits, uint64_t& misses) const { m_address_map.get_cache_stats(hits, misses); }
1126
1133 void reset_cache_stats() { m_address_map.reset_cache_stats(); }
1134};
1135} // namespace gs
1136
1137extern "C" void module_register();
1138#endif
Definition target.h:160
Definition router.h:57
AddrMapNoCache - A cache implementation that never caches (always misses).
Definition router.h:76
bool get(const Key &key, Value &value) override
Always returns false (cache miss)
Definition router.h:79
void clear() override
Does nothing (nothing to clear)
Definition router.h:94
void put(const Key &key, const Value &value, uint64_t size) override
Does nothing (no caching)
Definition router.h:87
Definition pathid_extension.h:26
Definition router_if.h:24
A SystemC TLM router module for transaction routing based on address.
Definition router.h:114
virtual void before_end_of_elaboration() override
Called before end of elaboration to ensure lazy initialization.
Definition router.h:902
virtual void add_initiator(InitiatorSocket &i)
Adds an initiator to the router.
Definition router.h:1110
router(const sc_core::sc_module_name &nm, cci::cci_broker_handle broker=cci::cci_get_broker())
Constructor for the router module.
Definition router.h:1041
tlm_utils::multi_passthrough_target_socket< router< BUSWIDTH, CacheType >, BUSWIDTH > target_socket
Target socket to receive transactions from initiators.
Definition router.h:593
router()=delete
Deleted default constructor to enforce named instantiation.
initiator_socket_type initiator_socket
Initiator socket to connect to targets.
Definition router.h:591
void get_cache_stats(uint64_t &hits, uint64_t &misses) const
Get cache statistics (hits and misses).
Definition router.h:1125
void add_target(TargetSocket &t, const uint64_t address, uint64_t size, bool masked=true, unsigned int priority=0)
Adds a target to the router's address map.
Definition router.h:1082
std::shared_ptr< target_info > decode_address(tlm::tlm_generic_payload &trans) override
Decodes the address from a TLM generic payload to find the target.
Definition router.h:889
router(const router &)=delete
Deleted copy constructor.
cci::cci_broker_handle m_broker
CCI broker handle for configuration parameters.
Definition router.h:595
void reset_cache_stats()
Reset cache statistics to zero.
Definition router.h:1133
~router()
Destructor for the router module. Cleans up any remaining PathIDExtension objects in the pool.
Definition router.h:1068
Tool which reads a Lua configuration file and sets parameters.
Definition biflow.cc:10
Definition router_if.h:49