1#![allow(unsafe_code)]
2use std::{
6 alloc::{alloc, dealloc, Layout},
7 convert::TryFrom,
8 fmt::{self, Debug},
9 mem,
10 ops::Deref,
11 ptr,
12 sync::atomic::{AtomicUsize, Ordering},
13};
14
15#[repr(C)]
19struct ArcInner<T: ?Sized> {
20 rc: AtomicUsize,
21 inner: T,
22}
23
24pub struct Arc<T: ?Sized> {
25 ptr: *mut ArcInner<T>,
26}
27
28unsafe impl<T: Send + Sync + ?Sized> Send for Arc<T> {}
29unsafe impl<T: Send + Sync + ?Sized> Sync for Arc<T> {}
30
31impl<T: Debug + ?Sized> Debug for Arc<T> {
32 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
33 Debug::fmt(&**self, f)
34 }
35}
36
37impl<T> Arc<T> {
38 pub fn new(inner: T) -> Arc<T> {
39 let bx = Box::new(ArcInner { inner, rc: AtomicUsize::new(1) });
40 let ptr = Box::into_raw(bx);
41 Arc { ptr }
42 }
43
44 unsafe fn copy_from_slice(s: &[T]) -> Arc<[T]> {
47 let align =
48 std::cmp::max(mem::align_of::<T>(), mem::align_of::<AtomicUsize>());
49
50 let rc_width = std::cmp::max(align, mem::size_of::<AtomicUsize>());
51 let data_width = mem::size_of::<T>().checked_mul(s.len()).unwrap();
52
53 let size_unpadded = rc_width.checked_add(data_width).unwrap();
54 let size_padded = (size_unpadded + align - 1) & !(align - 1);
56
57 let layout = Layout::from_size_align(size_padded, align).unwrap();
58
59 let ptr = alloc(layout);
60
61 assert!(!ptr.is_null(), "failed to allocate Arc");
62 #[allow(clippy::cast_ptr_alignment)]
63 ptr::write(ptr as _, AtomicUsize::new(1));
64
65 let data_ptr = ptr.add(rc_width);
66 ptr::copy_nonoverlapping(s.as_ptr(), data_ptr as _, s.len());
67
68 let fat_ptr: *const ArcInner<[T]> = Arc::fatten(ptr, s.len());
69
70 Arc { ptr: fat_ptr as *mut _ }
71 }
72
73 #[allow(trivial_casts)]
75 fn fatten(data: *const u8, len: usize) -> *const ArcInner<[T]> {
76 assert!(!data.is_null());
78 assert!(isize::try_from(len).is_ok());
79
80 let slice =
81 unsafe { core::slice::from_raw_parts(data as *const (), len) };
82 slice as *const [()] as *const _
83 }
84
85 pub fn into_raw(arc: Arc<T>) -> *const T {
86 let ptr = unsafe { &(*arc.ptr).inner };
87 #[allow(clippy::mem_forget)]
88 mem::forget(arc);
89 ptr
90 }
91
92 pub unsafe fn from_raw(ptr: *const T) -> Arc<T> {
93 let align =
94 std::cmp::max(mem::align_of::<T>(), mem::align_of::<AtomicUsize>());
95
96 let rc_width = std::cmp::max(align, mem::size_of::<AtomicUsize>());
97
98 let sub_ptr = (ptr as *const u8).sub(rc_width) as *mut ArcInner<T>;
99
100 Arc { ptr: sub_ptr }
101 }
102}
103
104impl<T: ?Sized> Arc<T> {
105 pub fn strong_count(arc: &Arc<T>) -> usize {
106 unsafe { (*arc.ptr).rc.load(Ordering::Acquire) }
107 }
108
109 pub fn get_mut(arc: &mut Arc<T>) -> Option<&mut T> {
110 if Arc::strong_count(arc) == 1 {
111 Some(unsafe { &mut arc.ptr.as_mut().unwrap().inner })
112 } else {
113 None
114 }
115 }
116}
117
118impl<T: ?Sized + Clone> Arc<T> {
119 pub fn make_mut(arc: &mut Arc<T>) -> &mut T {
120 if Arc::strong_count(arc) != 1 {
121 *arc = Arc::new((**arc).clone());
122 assert_eq!(Arc::strong_count(arc), 1);
123 }
124 Arc::get_mut(arc).unwrap()
125 }
126}
127
128impl<T: Default> Default for Arc<T> {
129 fn default() -> Arc<T> {
130 Arc::new(T::default())
131 }
132}
133
134impl<T: ?Sized> Clone for Arc<T> {
135 fn clone(&self) -> Arc<T> {
136 let last_count =
140 unsafe { (*self.ptr).rc.fetch_add(1, Ordering::Relaxed) };
141
142 if last_count == usize::max_value() {
143 std::process::abort();
144 }
145
146 Arc { ptr: self.ptr }
147 }
148}
149
150impl<T: ?Sized> Drop for Arc<T> {
151 fn drop(&mut self) {
152 unsafe {
153 let rc = (*self.ptr).rc.fetch_sub(1, Ordering::Release) - 1;
154 if rc == 0 {
155 std::sync::atomic::fence(Ordering::Acquire);
156 Box::from_raw(self.ptr);
157 }
158 }
159 }
160}
161
162impl<T: Copy> From<&[T]> for Arc<[T]> {
163 #[inline]
164 fn from(s: &[T]) -> Arc<[T]> {
165 unsafe { Arc::copy_from_slice(s) }
166 }
167}
168
169#[allow(clippy::fallible_impl_from)]
170impl<T> From<Box<[T]>> for Arc<[T]> {
171 #[inline]
172 fn from(b: Box<[T]>) -> Arc<[T]> {
173 let len = b.len();
174 unsafe {
175 let src = Box::into_raw(b);
176 let value_layout = Layout::for_value(&*src);
177 let align = std::cmp::max(
178 value_layout.align(),
179 mem::align_of::<AtomicUsize>(),
180 );
181 let rc_width = std::cmp::max(align, mem::size_of::<AtomicUsize>());
182 let unpadded_size =
183 rc_width.checked_add(value_layout.size()).unwrap();
184 let size = (unpadded_size + align - 1) & !(align - 1);
187 let dst_layout = Layout::from_size_align(size, align).unwrap();
188 let dst = alloc(dst_layout);
189 assert!(!dst.is_null(), "failed to allocate Arc");
190
191 #[allow(clippy::cast_ptr_alignment)]
192 ptr::write(dst as _, AtomicUsize::new(1));
193 let data_ptr = dst.add(rc_width);
194 ptr::copy_nonoverlapping(
195 src as *const u8,
196 data_ptr,
197 value_layout.size(),
198 );
199
200 if value_layout.size() != 0 {
202 dealloc(src as *mut u8, value_layout);
203 }
204
205 let fat_ptr: *const ArcInner<[T]> = Arc::fatten(dst, len);
206
207 Arc { ptr: fat_ptr as *mut _ }
208 }
209 }
210}
211
212#[test]
213fn boxed_slice_to_arc_slice() {
214 let box1: Box<[u8]> = Box::new([1, 2, 3]);
215 let arc1: Arc<[u8]> = box1.into();
216 assert_eq!(&*arc1, &*vec![1, 2, 3]);
217 let box2: Box<[u8]> = Box::new([]);
218 let arc2: Arc<[u8]> = box2.into();
219 assert_eq!(&*arc2, &*vec![]);
220 let box3: Box<[u64]> = Box::new([1, 2, 3]);
221 let arc3: Arc<[u64]> = box3.into();
222 assert_eq!(&*arc3, &*vec![1, 2, 3]);
223}
224
225impl<T> From<Vec<T>> for Arc<[T]> {
226 #[inline]
227 fn from(mut v: Vec<T>) -> Arc<[T]> {
228 unsafe {
229 let arc = Arc::copy_from_slice(&v);
230
231 v.set_len(0);
233
234 arc
235 }
236 }
237}
238
239impl<T: ?Sized> Deref for Arc<T> {
240 type Target = T;
241
242 fn deref(&self) -> &T {
243 unsafe { &(*self.ptr).inner }
244 }
245}
246
247impl<T: ?Sized> std::borrow::Borrow<T> for Arc<T> {
248 fn borrow(&self) -> &T {
249 &**self
250 }
251}
252
253impl<T: ?Sized> AsRef<T> for Arc<T> {
254 fn as_ref(&self) -> &T {
255 &**self
256 }
257}