quic/qbox
Loading...
Searching...
No Matches
exclusive-monitor.h
1/*
2 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
3 * Author: GreenSocs 2022
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8#ifndef GREENSOCS_BASE_COMPONENTS_MISC_EXCLUSIVE_MONITOR_H_
9#define GREENSOCS_BASE_COMPONENTS_MISC_EXCLUSIVE_MONITOR_H_
10
11#include <map>
12#include <memory>
13
14#include <systemc>
15#include <tlm>
16#include <tlm_utils/simple_initiator_socket.h>
17#include <tlm_utils/simple_target_socket.h>
18
19#include <tlm-extensions/exclusive-access.h>
20#include <tlm-extensions/pathid_extension.h>
21#include <tlm_sockets_buswidth.h>
22#include <module_factory_registery.h>
23
48class exclusive_monitor : public sc_core::sc_module
49{
50private:
52
53 class Region
54 {
55 public:
56 uint64_t start;
57 uint64_t end;
58 InitiatorId id;
59
60 Region(const tlm::tlm_generic_payload& txn, const InitiatorId& id)
61 : start(txn.get_address()), end(start + txn.get_data_length() - 1), id(id)
62 {
63 }
64
69 bool is_exact_match(const tlm::tlm_generic_payload& txn)
70 {
71 return (start == txn.get_address()) && (end == start + txn.get_data_length() - 1);
72 }
73 };
74
75 using RegionPtr = std::shared_ptr<Region>;
76
77 std::map<uint64_t, RegionPtr> m_regions;
78 std::map<InitiatorId, RegionPtr> m_regions_by_id;
79
80 const InitiatorId get_initiator_id(const tlm::tlm_generic_payload& txn)
81 {
83 txn.get_extension(ext);
84 return *ext;
85 }
86
87 RegionPtr find_region(const tlm::tlm_generic_payload& txn)
88 {
89 uint64_t start, end;
90
91 start = txn.get_address();
92 end = start + txn.get_data_length() - 1;
93
94 auto it = m_regions.lower_bound(start);
95
96 if (it != m_regions.begin()) {
97 it--;
98 }
99
100 while (it != m_regions.end()) {
101 RegionPtr r = it->second;
102
103 if (r->end < start) {
104 it++;
105 continue;
106 }
107
108 if (r->start > end) {
109 break;
110 }
111
112 return r;
113 }
114
115 return nullptr;
116 }
117
118 void dmi_invalidate(RegionPtr region) { front_socket->invalidate_direct_mem_ptr(region->start, region->end); }
119
120 void lock_region(const tlm::tlm_generic_payload& txn, const InitiatorId& id)
121 {
122 RegionPtr region(std::make_shared<Region>(txn, id));
123
124 assert(!find_region(txn));
125
126 m_regions[region->start] = region;
127 m_regions_by_id[region->id] = region;
128
129 dmi_invalidate(region);
130 }
131
132 void unlock_region(RegionPtr region)
133 {
134 assert(m_regions.find(region->start) != m_regions.end());
135 assert(m_regions_by_id.find(region->id) != m_regions_by_id.end());
136
137 m_regions.erase(region->start);
138 m_regions_by_id.erase(region->id);
139 }
140
141 void unlock_region_by_id(const InitiatorId& id)
142 {
143 if (m_regions_by_id.find(id) == m_regions_by_id.end()) {
144 return;
145 }
146
147 RegionPtr region = m_regions_by_id.at(id);
148
149 unlock_region(region);
150 }
151
152 void handle_exclusive_load(const tlm::tlm_generic_payload& txn, const ExclusiveAccessTlmExtension& ext)
153 {
154 const InitiatorId& id = get_initiator_id(txn);
155 RegionPtr region = find_region(txn);
156
157 if (region) {
158 /* Region already locked, do nothing */
159 return;
160 }
161
162 /*
163 * An exclusive load will unlock a previously locked region by the
164 * same initiator.
165 */
166 unlock_region_by_id(id);
167
168 lock_region(txn, id);
169 }
170
171 bool handle_exclusive_store(const tlm::tlm_generic_payload& txn, ExclusiveAccessTlmExtension& ext)
172 {
173 RegionPtr region = find_region(txn);
174
175 if (!region) {
176 /* This region is not locked */
177 ext.set_exclusive_store_failure();
178 return false;
179 }
180
181 if (region->id != get_initiator_id(txn)) {
182 /* This region is locked by another initiator */
183 ext.set_exclusive_store_failure();
184 return false;
185 }
186
187 if (!region->is_exact_match(txn)) {
188 /* This store is not exactly aligned with the locked region */
189 ext.set_exclusive_store_failure();
190 return false;
191 }
192
193 ext.set_exclusive_store_success();
194 unlock_region(region);
195
196 return true;
197 }
198
199 void handle_regular_store(const tlm::tlm_generic_payload& txn)
200 {
201 RegionPtr region;
202
203 /* Unlock all regions intersecting with the store */
204 while ((region = find_region(txn))) {
205 unlock_region(region);
206 }
207 }
208
209 /*
210 * Called before the actual b_transport forwarding. Handle regular and
211 * exclusive stores and return true if the b_transport call must be skipped
212 * completely (because of a exclusive store failure).
213 */
214 bool before_b_transport(const tlm::tlm_generic_payload& txn)
215 {
217 bool is_store = txn.get_command() == tlm::TLM_WRITE_COMMAND;
218
219 txn.get_extension(ext);
220
221 if (!is_store) {
222 /* Carry on with b_transport */
223 return true;
224 }
225
226 if (ext) {
227 /* We have an exclusive access */
228 return handle_exclusive_store(txn, *ext);
229 } else {
230 /*
231 * This is not an exclusive access. We are still interested in
232 * regular stores as they will unlock a locked region.
233 */
234 handle_regular_store(txn);
235 return true;
236 }
237 }
238
239 /*
240 * Called after the actual b_transport forwarding. Handles exclusive loads
241 * and return true if the DMI hint must be cleared in the transaction.
242 */
243 bool after_b_transport(const tlm::tlm_generic_payload& txn)
244 {
246 bool is_store = txn.get_command() == tlm::TLM_WRITE_COMMAND;
247
248 txn.get_extension(ext);
249
250 if (is_store) {
251 /*
252 * Already handled in before_b_transport. If we didn't return early
253 * from b_transport, we know for sure we must not clear the DMI
254 * hint if present.
255 */
256 return false;
257 }
258
259 if (!ext) {
260 RegionPtr region = find_region(txn);
261
262 /*
263 * For a regular load, if the corresponding region is locked, clear
264 * the DMI hint if present.
265 */
266 return region != nullptr;
267 }
268
269 /* We have an exclusive load */
270 handle_exclusive_load(txn, *ext);
271
272 /* We know for sure the corresponding region is locked, so clear the hint. */
273 return true;
274 }
275
276 void b_transport(tlm::tlm_generic_payload& txn, sc_core::sc_time& delay)
277 {
278 tlm::tlm_generic_payload txn_copy;
279
280 if (!before_b_transport(txn)) {
281 /* Exclusive store failure */
282 txn.set_response_status(tlm::TLM_GENERIC_ERROR_RESPONSE);
283 return;
284 }
285
286 /*
287 * We keep a copy of the transaction in case the next modules in the
288 * call chain mess with it.
289 */
290 txn_copy.deep_copy_from(txn);
291
292 back_socket->b_transport(txn, delay);
293
294 if (txn.get_response_status() != tlm::TLM_OK_RESPONSE) {
295 /* Ignore the transaction in case the target reports a failure */
296 return;
297 }
298
299 if (after_b_transport(txn_copy)) {
300 txn.set_dmi_allowed(false);
301 }
302 }
303
304 unsigned int transport_dbg(tlm::tlm_generic_payload& txn) { return back_socket->transport_dbg(txn); }
305
306 bool get_direct_mem_ptr(tlm::tlm_generic_payload& txn, tlm::tlm_dmi& dmi_data)
307 {
310 unsigned char* fixed_ptr;
311 bool ret = back_socket->get_direct_mem_ptr(txn, dmi_data);
312
313 if (!ret) {
314 /* The underlying target said no, no need to do more on our side */
315 return ret;
316 }
317
318 txn_start = txn.get_address();
319
320 fixed_start = dmi_data.get_start_address();
321 fixed_end = dmi_data.get_end_address();
322
323 auto it = m_regions.upper_bound(txn.get_address());
324
325 if (it != m_regions.begin()) {
326 it--;
327 }
328
329 for (; it != m_regions.end(); it++) {
330 RegionPtr r = it->second;
331
332 if (r->end < fixed_start) {
333 /* not in the returned DMI region yet */
334 continue;
335 }
336
337 if (r->start > fixed_end) {
338 /* beyond the DMI region, we're done */
339 break;
340 }
341
342 if ((r->start <= txn_start) && (r->end >= txn_start)) {
343 /* The exclusive region intersects with the request */
344 return false;
345 }
346
347 if (r->end < txn_start) {
348 /* Fix the left side of the interval */
349 fixed_start = r->end + 1;
350 }
351
352 if (r->start > txn_start) {
353 /* Fix the right side and stop here */
354 fixed_end = r->start - 1;
355 break;
356 }
357 }
358
359 fixed_ptr = dmi_data.get_dmi_ptr() + (fixed_start - dmi_data.get_start_address());
360 dmi_data.set_dmi_ptr(fixed_ptr);
361 dmi_data.set_start_address(fixed_start);
362 dmi_data.set_end_address(fixed_end);
363
364 return true;
365 }
366
367 void invalidate_direct_mem_ptr(sc_dt::uint64 start_range, sc_dt::uint64 end_range)
368 {
369 front_socket->invalidate_direct_mem_ptr(start_range, end_range);
370 }
371
372public:
373 tlm_utils::simple_target_socket<exclusive_monitor, DEFAULT_TLM_BUSWIDTH> front_socket;
374 tlm_utils::simple_initiator_socket<exclusive_monitor, DEFAULT_TLM_BUSWIDTH> back_socket;
375
376 exclusive_monitor(const sc_core::sc_module_name& name)
377 : sc_core::sc_module(name), front_socket("front-socket"), back_socket("back-socket")
378 {
379 front_socket.register_b_transport(this, &exclusive_monitor::b_transport);
380 front_socket.register_transport_dbg(this, &exclusive_monitor::transport_dbg);
381 front_socket.register_get_direct_mem_ptr(this, &exclusive_monitor::get_direct_mem_ptr);
382 back_socket.register_invalidate_direct_mem_ptr(this, &exclusive_monitor::invalidate_direct_mem_ptr);
383 }
384
385 exclusive_monitor() = delete;
386 exclusive_monitor(const exclusive_monitor&) = delete;
387
388 virtual ~exclusive_monitor() {}
389};
390
391extern "C" void module_register();
392
393#endif
Exclusive load/store TLM extension.
Definition exclusive-access.h:36
Definition target.h:160
ARM-like global exclusive monitor.
Definition exclusive-monitor.h:49
Definition pathid_extension.h:26