# Final Code Here's the final code, with some added comments and re-ordered imports: ```rust use std::marker::PhantomData; use std::ops::Deref; use std::ptr::NonNull; use std::sync::atomic::{self, AtomicUsize, Ordering}; pub struct Arc { ptr: NonNull>, phantom: PhantomData>, } pub struct ArcInner { rc: AtomicUsize, data: T, } impl Arc { pub fn new(data: T) -> Arc { // We start the reference count at 1, as that first reference is the // current pointer. let boxed = Box::new(ArcInner { rc: AtomicUsize::new(1), data, }); Arc { // It is okay to call `.unwrap()` here as we get a pointer from // `Box::into_raw` which is guaranteed to not be null. ptr: NonNull::new(Box::into_raw(boxed)).unwrap(), phantom: PhantomData, } } } unsafe impl Send for Arc {} unsafe impl Sync for Arc {} impl Deref for Arc { type Target = T; fn deref(&self) -> &T { let inner = unsafe { self.ptr.as_ref() }; &inner.data } } impl Clone for Arc { fn clone(&self) -> Arc { let inner = unsafe { self.ptr.as_ref() }; // Using a relaxed ordering is alright here as we don't need any atomic // synchronization here as we're not modifying or accessing the inner // data. let old_rc = inner.rc.fetch_add(1, Ordering::Relaxed); if old_rc >= isize::MAX as usize { std::process::abort(); } Self { ptr: self.ptr, phantom: PhantomData, } } } impl Drop for Arc { fn drop(&mut self) { if unsafe { self.ptr.as_ref() }.rc.fetch_sub(1, Ordering::Release) != 1 { return; } // This fence is needed to prevent reordering of the use and deletion // of the data. atomic::fence(Ordering::Acquire); // This is safe as we know we have the last pointer to the `ArcInner` // and that its pointer is valid. unsafe { Box::from_raw(self.ptr.as_ptr()); } } } ```