sled/
atomic_shim.rs

1///! Inline of https://github.com/bltavares/atomic-shim
2
3#[cfg(not(any(
4    target_arch = "mips",
5    target_arch = "powerpc",
6    feature = "mutex"
7)))]
8pub use std::sync::atomic::{AtomicI64, AtomicU64};
9#[cfg(any(target_arch = "mips", target_arch = "powerpc", feature = "mutex"))]
10mod shim {
11    use crossbeam_utils::sync::ShardedLock;
12    use std::sync::atomic::Ordering;
13
14    #[derive(Debug, Default)]
15    pub struct AtomicU64 {
16        value: ShardedLock<u64>,
17    }
18
19    impl AtomicU64 {
20        pub fn new(v: u64) -> Self {
21            Self { value: ShardedLock::new(v) }
22        }
23
24        #[allow(dead_code)]
25        pub fn get_mut(&mut self) -> &mut u64 {
26            self.value.get_mut().unwrap()
27        }
28
29        #[allow(dead_code)]
30        pub fn into_inner(self) -> u64 {
31            self.value.into_inner().unwrap()
32        }
33
34        #[allow(dead_code)]
35        pub fn load(&self, _: Ordering) -> u64 {
36            *self.value.read().unwrap()
37        }
38
39        #[allow(dead_code)]
40        pub fn store(&self, value: u64, _: Ordering) {
41            let mut lock = self.value.write().unwrap();
42            *lock = value;
43        }
44
45        #[allow(dead_code)]
46        pub fn swap(&self, value: u64, _: Ordering) -> u64 {
47            let mut lock = self.value.write().unwrap();
48            let prev = *lock;
49            *lock = value;
50            prev
51        }
52
53        #[allow(dead_code)]
54        pub fn compare_and_swap(
55            &self,
56            current: u64,
57            new: u64,
58            _: Ordering,
59        ) -> u64 {
60            let mut lock = self.value.write().unwrap();
61            let prev = *lock;
62            if prev == current {
63                *lock = new;
64            };
65            prev
66        }
67
68        #[allow(dead_code)]
69        pub fn compare_exchange(
70            &self,
71            current: u64,
72            new: u64,
73            _: Ordering,
74            _: Ordering,
75        ) -> Result<u64, u64> {
76            let mut lock = self.value.write().unwrap();
77            let prev = *lock;
78            if prev == current {
79                *lock = new;
80                Ok(current)
81            } else {
82                Err(prev)
83            }
84        }
85
86        #[allow(dead_code)]
87        pub fn compare_exchange_weak(
88            &self,
89            current: u64,
90            new: u64,
91            success: Ordering,
92            failure: Ordering,
93        ) -> Result<u64, u64> {
94            self.compare_exchange(current, new, success, failure)
95        }
96
97        #[allow(dead_code)]
98        pub fn fetch_add(&self, val: u64, _: Ordering) -> u64 {
99            let mut lock = self.value.write().unwrap();
100            let prev = *lock;
101            *lock = prev.wrapping_add(val);
102            prev
103        }
104
105        #[allow(dead_code)]
106        pub fn fetch_sub(&self, val: u64, _: Ordering) -> u64 {
107            let mut lock = self.value.write().unwrap();
108            let prev = *lock;
109            *lock = prev.wrapping_sub(val);
110            prev
111        }
112
113        #[allow(dead_code)]
114        pub fn fetch_and(&self, val: u64, _: Ordering) -> u64 {
115            let mut lock = self.value.write().unwrap();
116            let prev = *lock;
117            *lock = prev & val;
118            prev
119        }
120
121        #[allow(dead_code)]
122        pub fn fetch_nand(&self, val: u64, _: Ordering) -> u64 {
123            let mut lock = self.value.write().unwrap();
124            let prev = *lock;
125            *lock = !(prev & val);
126            prev
127        }
128
129        #[allow(dead_code)]
130        pub fn fetch_or(&self, val: u64, _: Ordering) -> u64 {
131            let mut lock = self.value.write().unwrap();
132            let prev = *lock;
133            *lock = prev | val;
134            prev
135        }
136
137        #[allow(dead_code)]
138        pub fn fetch_xor(&self, val: u64, _: Ordering) -> u64 {
139            let mut lock = self.value.write().unwrap();
140            let prev = *lock;
141            *lock = prev ^ val;
142            prev
143        }
144    }
145
146    impl From<u64> for AtomicU64 {
147        fn from(value: u64) -> Self {
148            AtomicU64::new(value)
149        }
150    }
151
152    #[derive(Debug, Default)]
153    pub struct AtomicI64 {
154        value: ShardedLock<i64>,
155    }
156
157    impl AtomicI64 {
158        pub fn new(v: i64) -> Self {
159            Self { value: ShardedLock::new(v) }
160        }
161
162        #[allow(dead_code)]
163        pub fn get_mut(&mut self) -> &mut i64 {
164            self.value.get_mut().unwrap()
165        }
166
167        #[allow(dead_code)]
168        pub fn into_inner(self) -> i64 {
169            self.value.into_inner().unwrap()
170        }
171
172        #[allow(dead_code)]
173        pub fn load(&self, _: Ordering) -> i64 {
174            *self.value.read().unwrap()
175        }
176
177        #[allow(dead_code)]
178        pub fn store(&self, value: i64, _: Ordering) {
179            let mut lock = self.value.write().unwrap();
180            *lock = value;
181        }
182
183        #[allow(dead_code)]
184        pub fn swap(&self, value: i64, _: Ordering) -> i64 {
185            let mut lock = self.value.write().unwrap();
186            let prev = *lock;
187            *lock = value;
188            prev
189        }
190
191        #[allow(dead_code)]
192        pub fn compare_and_swap(
193            &self,
194            current: i64,
195            new: i64,
196            _: Ordering,
197        ) -> i64 {
198            let mut lock = self.value.write().unwrap();
199            let prev = *lock;
200            if prev == current {
201                *lock = new;
202            };
203            prev
204        }
205
206        #[allow(dead_code)]
207        pub fn compare_exchange(
208            &self,
209            current: i64,
210            new: i64,
211            _: Ordering,
212            _: Ordering,
213        ) -> Result<i64, i64> {
214            let mut lock = self.value.write().unwrap();
215            let prev = *lock;
216            if prev == current {
217                *lock = new;
218                Ok(current)
219            } else {
220                Err(prev)
221            }
222        }
223
224        #[allow(dead_code)]
225        pub fn compare_exchange_weak(
226            &self,
227            current: i64,
228            new: i64,
229            success: Ordering,
230            failure: Ordering,
231        ) -> Result<i64, i64> {
232            self.compare_exchange(current, new, success, failure)
233        }
234
235        #[allow(dead_code)]
236        pub fn fetch_add(&self, val: i64, _: Ordering) -> i64 {
237            let mut lock = self.value.write().unwrap();
238            let prev = *lock;
239            *lock = prev.wrapping_add(val);
240            prev
241        }
242
243        #[allow(dead_code)]
244        pub fn fetch_sub(&self, val: i64, _: Ordering) -> i64 {
245            let mut lock = self.value.write().unwrap();
246            let prev = *lock;
247            *lock = prev.wrapping_sub(val);
248            prev
249        }
250
251        #[allow(dead_code)]
252        pub fn fetch_and(&self, val: i64, _: Ordering) -> i64 {
253            let mut lock = self.value.write().unwrap();
254            let prev = *lock;
255            *lock = prev & val;
256            prev
257        }
258
259        #[allow(dead_code)]
260        pub fn fetch_nand(&self, val: i64, _: Ordering) -> i64 {
261            let mut lock = self.value.write().unwrap();
262            let prev = *lock;
263            *lock = !(prev & val);
264            prev
265        }
266
267        #[allow(dead_code)]
268        pub fn fetch_or(&self, val: i64, _: Ordering) -> i64 {
269            let mut lock = self.value.write().unwrap();
270            let prev = *lock;
271            *lock = prev | val;
272            prev
273        }
274
275        #[allow(dead_code)]
276        pub fn fetch_xor(&self, val: i64, _: Ordering) -> i64 {
277            let mut lock = self.value.write().unwrap();
278            let prev = *lock;
279            *lock = prev ^ val;
280            prev
281        }
282    }
283
284    impl From<i64> for AtomicI64 {
285        fn from(value: i64) -> Self {
286            AtomicI64::new(value)
287        }
288    }
289}
290
291#[cfg(any(
292    target_arch = "mips",
293    target_arch = "powerpc",
294    feature = "mutex"
295))]
296pub use shim::{AtomicI64, AtomicU64};