# Final Code Here's the final code, with some added comments and re-ordered imports: ```rust use std::marker::PhantomData; use std::ops::Deref; use std::ptr::NonNull; use std::sync::atomic::{self, AtomicUsize, Ordering}; pub struct Arc { ptr: NonNull>, _marker: PhantomData>, } pub struct ArcInner { rc: AtomicUsize, data: T, } impl Arc { pub fn new(data: T) -> Arc { // We start the reference count at 1, as that first reference is the // current pointer. let boxed = Box::new(ArcInner { rc: AtomicUsize::new(1), data, }); Arc { // It is okay to call `.unwrap()` here as we get a pointer from // `Box::into_raw` which is guaranteed to not be null. ptr: NonNull::new(Box::into_raw(boxed)).unwrap(), _marker: PhantomData, } } fn inner(&self) -> &ArcInner { // This unsafety is okay because while this Arc is alive, we're // guaranteed that the inner pointer is valid. Also, ArcInner is // Sync if T is Sync. unsafe { self.ptr.as_ref() } } } unsafe impl Send for Arc {} unsafe impl Sync for Arc {} impl Clone for Arc { fn clone(&self) -> Arc { // Using a relaxed ordering is alright here as knowledge of the original // reference prevents other threads from wrongly deleting the object. self.inner().rc.fetch_add(1, Ordering::Relaxed); Self { ptr: self.ptr, _marker: PhantomData, } } } impl Drop for Arc { fn drop(&mut self) { if self.inner().rc.fetch_sub(1, Ordering::Release) != 1 { return; } // This fence is needed to prevent reordering of the use and deletion // of the data. atomic::fence(Ordering::Acquire); // This is safe as we know we have the last pointer to the `ArcInner` // and that its pointer is valid. unsafe { Box::from_raw(self.ptr.as_ptr()); } } } impl Deref for Arc { type Target = T; fn deref(&self) -> &T { &self.inner().data } } ```