61 : start(
txn.get_address()), end(start +
txn.get_data_length() - 1), id(
id)
69 bool is_exact_match(
const tlm::tlm_generic_payload&
txn)
71 return (start ==
txn.get_address()) && (end == start +
txn.get_data_length() - 1);
75 using RegionPtr = std::shared_ptr<Region>;
77 std::map<uint64_t, RegionPtr> m_regions;
78 std::map<InitiatorId, RegionPtr> m_regions_by_id;
80 const InitiatorId get_initiator_id(
const tlm::tlm_generic_payload&
txn)
87 RegionPtr find_region(
const tlm::tlm_generic_payload&
txn)
91 start =
txn.get_address();
92 end = start +
txn.get_data_length() - 1;
94 auto it = m_regions.lower_bound(start);
96 if (
it != m_regions.begin()) {
100 while (
it != m_regions.end()) {
101 RegionPtr r =
it->second;
103 if (r->end < start) {
108 if (r->start > end) {
118 void dmi_invalidate(RegionPtr
region) { front_socket->invalidate_direct_mem_ptr(
region->start,
region->end); }
120 void lock_region(
const tlm::tlm_generic_payload&
txn,
const InitiatorId&
id)
122 RegionPtr
region(std::make_shared<Region>(
txn,
id));
132 void unlock_region(RegionPtr
region)
134 assert(m_regions.find(
region->start) != m_regions.end());
135 assert(m_regions_by_id.find(
region->id) != m_regions_by_id.end());
137 m_regions.erase(
region->start);
138 m_regions_by_id.erase(
region->id);
143 if (m_regions_by_id.find(
id) == m_regions_by_id.end()) {
147 RegionPtr
region = m_regions_by_id.at(
id);
166 unlock_region_by_id(
id);
168 lock_region(
txn,
id);
177 ext.set_exclusive_store_failure();
181 if (
region->id != get_initiator_id(
txn)) {
183 ext.set_exclusive_store_failure();
189 ext.set_exclusive_store_failure();
193 ext.set_exclusive_store_success();
199 void handle_regular_store(
const tlm::tlm_generic_payload&
txn)
214 bool before_b_transport(
const tlm::tlm_generic_payload&
txn)
217 bool is_store =
txn.get_command() == tlm::TLM_WRITE_COMMAND;
228 return handle_exclusive_store(
txn, *
ext);
234 handle_regular_store(
txn);
243 bool after_b_transport(
const tlm::tlm_generic_payload&
txn)
246 bool is_store =
txn.get_command() == tlm::TLM_WRITE_COMMAND;
270 handle_exclusive_load(
txn, *
ext);
276 void b_transport(tlm::tlm_generic_payload&
txn, sc_core::sc_time& delay)
280 if (!before_b_transport(
txn)) {
282 txn.set_response_status(tlm::TLM_GENERIC_ERROR_RESPONSE);
292 back_socket->b_transport(
txn, delay);
294 if (
txn.get_response_status() != tlm::TLM_OK_RESPONSE) {
300 txn.set_dmi_allowed(
false);
304 unsigned int transport_dbg(tlm::tlm_generic_payload&
txn) {
return back_socket->transport_dbg(
txn); }
306 bool get_direct_mem_ptr(tlm::tlm_generic_payload&
txn, tlm::tlm_dmi& dmi_data)
311 bool ret = back_socket->get_direct_mem_ptr(
txn, dmi_data);
323 auto it = m_regions.upper_bound(
txn.get_address());
325 if (
it != m_regions.begin()) {
329 for (;
it != m_regions.end();
it++) {
330 RegionPtr r =
it->second;
373 tlm_utils::simple_target_socket<exclusive_monitor, DEFAULT_TLM_BUSWIDTH> front_socket;
374 tlm_utils::simple_initiator_socket<exclusive_monitor, DEFAULT_TLM_BUSWIDTH> back_socket;
377 : sc_core::sc_module(name), front_socket(
"front-socket"), back_socket(
"back-socket")
379 front_socket.register_b_transport(
this, &exclusive_monitor::b_transport);
380 front_socket.register_transport_dbg(
this, &exclusive_monitor::transport_dbg);
381 front_socket.register_get_direct_mem_ptr(
this, &exclusive_monitor::get_direct_mem_ptr);
382 back_socket.register_invalidate_direct_mem_ptr(
this, &exclusive_monitor::invalidate_direct_mem_ptr);