Update sample code with stable alloc crate

pull/161/head
Lzu Tao 6 years ago
parent 58e36e0e08
commit c7b7850e6f

@ -122,6 +122,7 @@ Nope.
Let's consider a simplified implementation of Rc:
```rust,ignore
# use std::alloc;
struct Rc<T> {
ptr: *mut RcBox<T>,
}
@ -134,8 +135,7 @@ struct RcBox<T> {
impl<T> Rc<T> {
fn new(data: T) -> Self {
unsafe {
// Wouldn't it be nice if heap::allocate worked like this?
let ptr = heap::allocate::<RcBox<T>>();
let ptr = alloc::alloc(alloc::Layout::new::<RcBox<T>>);
ptr::write(ptr, RcBox {
data: data,
ref_count: 1,
@ -159,7 +159,7 @@ impl<T> Drop for Rc<T> {
if (*self.ptr).ref_count == 0 {
// drop the data and then free it
ptr::read(self.ptr);
heap::deallocate(self.ptr);
alloc::dealloc(self.ptr, alloc::Layout::new::<RcBox<T>>);
}
}
}

@ -15,15 +15,24 @@ want to use `empty` because there's no real allocation to talk about but
So:
```rust,ignore
#![feature(alloc, heap_api)]
```rust
# #![feature(ptr_internals)]
# use std::ptr::{self, Unique};
use std::mem;
# pub struct Vec<T> {
# ptr: Unique<T>,
# cap: usize,
# len: usize,
# }
impl<T> Vec<T> {
fn new() -> Self {
pub fn new() -> Self {
assert!(mem::size_of::<T>() != 0, "We're not ready to handle ZSTs");
Vec { ptr: Unique::empty(), len: 0, cap: 0 }
Vec {
ptr: Unique::empty(),
len: 0,
cap: 0,
}
}
}
```
@ -37,8 +46,8 @@ that, we'll need to use the rest of the heap APIs. These basically allow us to
talk directly to Rust's allocator (jemalloc by default).
We'll also need a way to handle out-of-memory (OOM) conditions. The standard
library calls `std::alloc::oom()`, which in turn calls the the `oom` langitem,
which aborts the program in a platform-specific manner.
library calls `std::alloc::handle_alloc_error()`, which in turn calls the the
`oom` langitem, which aborts the program in a platform-specific manner.
The reason we abort and don't panic is because unwinding can cause allocations
to happen, and that seems like a bad thing to do when your allocator just came
back with "hey I don't have any more memory".
@ -157,24 +166,22 @@ such we will guard against this case explicitly.
Ok with all the nonsense out of the way, let's actually allocate some memory:
```rust,ignore
use std::alloc::oom;
use std::alloc;
fn grow(&mut self) {
// this is all pretty delicate, so let's say it's all unsafe
unsafe {
// current API requires us to specify size and alignment manually.
let align = mem::align_of::<T>();
let elem_size = mem::size_of::<T>();
let layout = alloc::Layout::new::<T>();
let (new_cap, ptr) = if self.cap == 0 {
let ptr = heap::allocate(elem_size, align);
let ptr = alloc::alloc(layout);
(1, ptr)
} else {
// as an invariant, we can assume that `self.cap < isize::MAX`,
// so this doesn't need to be checked.
let new_cap = self.cap * 2;
// Similarly this can't overflow due to previously allocating this
let old_num_bytes = self.cap * elem_size;
let old_num_bytes = self.cap * mem::size_of::<T>();
// check that the new allocation doesn't exceed `isize::MAX` at all
// regardless of the actual size of the capacity. This combines the
@ -182,21 +189,22 @@ fn grow(&mut self) {
// we need to make. We lose the ability to allocate e.g. 2/3rds of
// the address space with a single Vec of i16's on 32-bit though.
// Alas, poor Yorick -- I knew him, Horatio.
assert!(old_num_bytes <= (::std::isize::MAX as usize) / 2,
"capacity overflow");
assert!(
old_num_bytes <= (std::isize::MAX as usize) / 2,
"capacity overflow"
);
let new_num_bytes = old_num_bytes * 2;
let ptr = heap::reallocate(self.ptr.as_ptr() as *mut _,
old_num_bytes,
new_num_bytes,
align);
let ptr = alloc::realloc(self.ptr.as_ptr() as *mut u8, layout, new_num_bytes);
(new_cap, ptr)
};
// If allocate or reallocate fail, we'll get `null` back
if ptr.is_null() { oom(); }
if ptr.is_null() {
alloc::handle_alloc_error(layout);
}
self.ptr = Unique::new(ptr as *mut _);
self.ptr = Unique::new_unchecked(ptr as *mut T);
self.cap = new_cap;
}
}

@ -7,7 +7,7 @@ ask Rust if `T` `needs_drop` and omit the calls to `pop`. However in practice
LLVM is *really* good at removing simple side-effect free code like this, so I
wouldn't bother unless you notice it's not being stripped (in this case it is).
We must not call `heap::deallocate` when `self.cap == 0`, as in this case we
We must not call `alloc::dealloc` when `self.cap == 0`, as in this case we
haven't actually allocated any memory.
@ -15,13 +15,9 @@ haven't actually allocated any memory.
impl<T> Drop for Vec<T> {
fn drop(&mut self) {
if self.cap != 0 {
while let Some(_) = self.pop() { }
let align = mem::align_of::<T>();
let elem_size = mem::size_of::<T>();
let num_bytes = elem_size * self.cap;
while let Some(_) = self.pop() {}
unsafe {
heap::deallocate(self.ptr.as_ptr() as *mut _, num_bytes, align);
alloc::dealloc(self.ptr.as_ptr() as *mut u8, alloc::Layout::new::<T>());
}
}
}

@ -16,23 +16,23 @@ use std::ops::Deref;
impl<T> Deref for Vec<T> {
type Target = [T];
fn deref(&self) -> &[T] {
fn deref(&self) -> &Self::Target {
unsafe {
::std::slice::from_raw_parts(self.ptr.as_ptr(), self.len)
std::slice::from_raw_parts(self.ptr.as_ptr(), self.len)
}
}
}
```
And let's do DerefMut too:
And let's do `DerefMut` too:
```rust,ignore
use std::ops::DerefMut;
impl<T> DerefMut for Vec<T> {
fn deref_mut(&mut self) -> &mut [T] {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe {
::std::slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len)
std::slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len)
}
}
}

@ -16,17 +16,20 @@ using the old len.
pub fn insert(&mut self, index: usize, elem: T) {
// Note: `<=` because it's valid to insert after everything
// which would be equivalent to push.
assert!(index <= self.len, "index out of bounds");
if self.cap == self.len { self.grow(); }
assert!(self.len >= index, "index is out of bounds");
if self.cap == self.len {
self.grow();
}
unsafe {
if index < self.len {
// ptr::copy(src, dest, len): "copy from source to dest len elems"
ptr::copy(self.ptr.offset(index as isize),
self.ptr.offset(index as isize + 1),
self.len - index);
ptr::copy(
self.ptr.as_ptr().offset(index as isize),
self.ptr.as_ptr().offset(index as isize + 1),
self.len - index,
);
}
ptr::write(self.ptr.offset(index as isize), elem);
ptr::write(self.ptr.as_ptr().offset(index as isize), elem);
self.len += 1;
}
}
@ -38,13 +41,18 @@ Remove behaves in the opposite manner. We need to shift all the elements from
```rust,ignore
pub fn remove(&mut self, index: usize) -> T {
// Note: `<` because it's *not* valid to remove after everything
assert!(index < self.len, "index out of bounds");
assert!(self.len > index, "index is out of bounds");
unsafe {
self.len -= 1;
let result = ptr::read(self.ptr.offset(index as isize));
ptr::copy(self.ptr.offset(index as isize + 1),
self.ptr.offset(index as isize),
self.len - index);
let result = ptr::read(self.ptr.as_ptr().offset(index as isize));
ptr::copy(
self.ptr.as_ptr().offset(index as isize + 1),
self.ptr.as_ptr().offset(index as isize),
self.len - index,
);
result
}
}

@ -11,7 +11,7 @@ allocation.
IntoIter needs to be DoubleEnded as well, to enable reading from both ends.
Reading from the back could just be implemented as calling `pop`, but reading
from the front is harder. We could call `remove(0)` but that would be insanely
expensive. Instead we're going to just use ptr::read to copy values out of
expensive. Instead we're going to just use `ptr::read` to copy values out of
either end of the Vec without mutating the buffer at all.
To do this we're going to use a very common C idiom for array iteration. We'll
@ -20,7 +20,7 @@ points to one-element past the end. When we want an element from one end, we'll
read out the value pointed to at that end and move the pointer over by one. When
the two pointers are equal, we know we're done.
Note that the order of read and offset are reversed for `next` and `next_back`
Note that the order of read and offset are reversed for `next` and `next_back`.
For `next_back` the pointer is always after the element it wants to read next,
while for `next` the pointer is always at the element it wants to read next.
To see why this is, consider the case where every element but one has been
@ -55,7 +55,7 @@ And this is what we end up with for initialization:
```rust,ignore
impl<T> Vec<T> {
fn into_iter(self) -> IntoIter<T> {
pub fn into_iter(self) -> IntoIter<T> {
// Can't destructure Vec since it's Drop
let ptr = self.ptr;
let cap = self.cap;
@ -67,14 +67,14 @@ impl<T> Vec<T> {
unsafe {
IntoIter {
buf: ptr,
cap: cap,
start: *ptr,
cap,
start: ptr.as_ptr(),
end: if cap == 0 {
// can't offset off this pointer, it's not allocated!
*ptr
ptr.as_ptr()
} else {
ptr.offset(len as isize)
}
ptr.as_ptr().offset(len as isize)
},
}
}
}
@ -86,7 +86,7 @@ Here's iterating forward:
```rust,ignore
impl<T> Iterator for IntoIter<T> {
type Item = T;
fn next(&mut self) -> Option<T> {
fn next(&mut self) -> Option<Self::Item> {
if self.start == self.end {
None
} else {
@ -99,8 +99,7 @@ impl<T> Iterator for IntoIter<T> {
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = (self.end as usize - self.start as usize)
/ mem::size_of::<T>();
let len = (self.end as usize - self.start as usize) / mem::size_of::<T>();
(len, Some(len))
}
}
@ -110,7 +109,7 @@ And here's iterating backwards.
```rust,ignore
impl<T> DoubleEndedIterator for IntoIter<T> {
fn next_back(&mut self) -> Option<T> {
fn next_back(&mut self) -> Option<Self::Item> {
if self.start == self.end {
None
} else {
@ -134,12 +133,8 @@ impl<T> Drop for IntoIter<T> {
if self.cap != 0 {
// drop any remaining elements
for _ in &mut *self {}
let align = mem::align_of::<T>();
let elem_size = mem::size_of::<T>();
let num_bytes = elem_size * self.cap;
unsafe {
heap::deallocate(self.buf.as_ptr() as *mut _, num_bytes, align);
alloc::dealloc(self.buf.as_ptr() as *mut _, alloc::Layout::new::<T>());
}
}
}

@ -18,11 +18,13 @@ For `push`, if the old len (before push was called) is 0, then we want to write
to the 0th index. So we should offset by the old len.
```rust,ignore
pub fn push(&mut self, elem: T) {
if self.len == self.cap { self.grow(); }
pub fn push(&mut self, elem: T) -> () {
if self.len == self.cap {
self.grow();
}
unsafe {
ptr::write(self.ptr.offset(self.len as isize), elem);
ptr::write(self.ptr.as_ptr().offset(self.len as isize), elem);
}
// Can't fail, we'll OOM first.
@ -47,9 +49,7 @@ pub fn pop(&mut self) -> Option<T> {
None
} else {
self.len -= 1;
unsafe {
Some(ptr::read(self.ptr.offset(self.len as isize)))
}
unsafe { Some(ptr::read(self.ptr.as_ptr().offset(self.len as isize))) }
}
}
```

@ -18,45 +18,47 @@ struct RawVec<T> {
impl<T> RawVec<T> {
fn new() -> Self {
assert!(mem::size_of::<T>() != 0, "TODO: implement ZST support");
RawVec { ptr: Unique::empty(), cap: 0 }
Self {
ptr: Unique::empty(),
cap: 0,
}
}
// unchanged from Vec
fn grow(&mut self) {
// this is all pretty delicate, so let's say it's all unsafe
unsafe {
let align = mem::align_of::<T>();
let elem_size = mem::size_of::<T>();
let layout = alloc::Layout::new::<T>();
let (new_cap, ptr) = if self.cap == 0 {
let ptr = heap::allocate(elem_size, align);
let ptr = alloc::alloc(layout);
(1, ptr)
} else {
let new_cap = 2 * self.cap;
let ptr = heap::reallocate(self.ptr.as_ptr() as *mut _,
self.cap * elem_size,
new_cap * elem_size,
align);
let new_cap = self.cap * 2;
let ptr = alloc::realloc(
self.ptr.as_ptr() as *mut u8,
layout,
new_cap * mem::size_of::<T>(),
);
(new_cap, ptr)
};
// If allocate or reallocate fail, we'll get `null` back
if ptr.is_null() { oom() }
if ptr.is_null() {
alloc::handle_alloc_error(layout);
}
self.ptr = Unique::new(ptr as *mut _);
self.ptr = Unique::new_unchecked(ptr as *mut T);
self.cap = new_cap;
}
}
}
impl<T> Drop for RawVec<T> {
fn drop(&mut self) {
if self.cap != 0 {
let align = mem::align_of::<T>();
let elem_size = mem::size_of::<T>();
let num_bytes = elem_size * self.cap;
unsafe {
heap::deallocate(self.ptr.as_mut() as *mut _, num_bytes, align);
alloc::dealloc(self.ptr.as_ptr() as *mut u8, alloc::Layout::new::<T>());
}
}
}
@ -116,15 +118,20 @@ impl<T> Drop for IntoIter<T> {
impl<T> Vec<T> {
pub fn into_iter(self) -> IntoIter<T> {
unsafe {
// need to use ptr::read to unsafely move the buf out since it's
// need to use `ptr::read` to unsafely move the buf out since it's
// not Copy, and Vec implements Drop (so we can't destructure it).
let buf = ptr::read(&self.buf);
let len = self.len;
mem::forget(self);
IntoIter {
start: *buf.ptr,
end: buf.ptr.offset(len as isize),
start: buf.ptr.as_ptr(),
end: if buf.cap == 0 {
// can't offset off this pointer, it's not allocated!
buf.ptr.as_ptr()
} else {
buf.ptr.as_ptr().offset(len as isize)
},
_buf: buf,
}
}

@ -35,39 +35,46 @@ method of RawVec.
```rust,ignore
impl<T> RawVec<T> {
fn new() -> Self {
// !0 is usize::MAX. This branch should be stripped at compile time.
let cap = if mem::size_of::<T>() == 0 { !0 } else { 0 };
// This branch should be stripped at compile time.
let cap = if mem::size_of::<T>() == 0 {
std::usize::MAX
} else {
0
};
// Unique::empty() doubles as "unallocated" and "zero-sized allocation"
RawVec { ptr: Unique::empty(), cap: cap }
Self {
ptr: Unique::empty(),
cap,
}
}
fn grow(&mut self) {
unsafe {
let elem_size = mem::size_of::<T>();
// since we set the capacity to usize::MAX when elem_size is
// Since we set the capacity to `usize::MAX` for ZST is
// 0, getting to here necessarily means the Vec is overfull.
assert!(elem_size != 0, "capacity overflow");
assert!(mem::size_of::<T>() != 0, "capacity overflow");
let align = mem::align_of::<T>();
let layout = alloc::Layout::new::<T>();
let (new_cap, ptr) = if self.cap == 0 {
let ptr = heap::allocate(elem_size, align);
let ptr = alloc::alloc(layout);
(1, ptr)
} else {
let new_cap = 2 * self.cap;
let ptr = heap::reallocate(self.ptr.as_ptr() as *mut _,
self.cap * elem_size,
new_cap * elem_size,
align);
let new_cap = self.cap * 2;
let ptr = alloc::realloc(
self.ptr.as_ptr() as *mut u8,
layout,
new_cap * mem::size_of::<T>(),
);
(new_cap, ptr)
};
// If allocate or reallocate fail, we'll get `null` back
if ptr.is_null() { oom() }
if ptr.is_null() {
alloc::handle_alloc_error(layout);
}
self.ptr = Unique::new(ptr as *mut _);
self.ptr = Unique::new_unchecked(ptr as *mut T);
self.cap = new_cap;
}
}
@ -75,15 +82,10 @@ impl<T> RawVec<T> {
impl<T> Drop for RawVec<T> {
fn drop(&mut self) {
let elem_size = mem::size_of::<T>();
// don't free zero-sized allocations, as they were never allocated.
if self.cap != 0 && elem_size != 0 {
let align = mem::align_of::<T>();
let num_bytes = elem_size * self.cap;
// Do not free zero-sized allocations, as they were never allocated.
if self.cap != 0 && mem::size_of::<T>() != 0 {
unsafe {
heap::deallocate(self.ptr.as_ptr() as *mut _, num_bytes, align);
alloc::dealloc(self.ptr.as_ptr() as *mut u8, alloc::Layout::new::<T>());
}
}
}
@ -106,15 +108,15 @@ increment, and then cast them back:
```rust,ignore
impl<T> RawValIter<T> {
unsafe fn new(slice: &[T]) -> Self {
RawValIter {
Self {
start: slice.as_ptr(),
end: if mem::size_of::<T>() == 0 {
((slice.as_ptr() as usize) + slice.len()) as *const _
((slice.as_ptr() as usize) + slice.len()) as *const T
} else if slice.len() == 0 {
slice.as_ptr()
} else {
slice.as_ptr().offset(slice.len() as isize)
}
},
}
}
}
@ -129,14 +131,14 @@ map size 0 to divide by 1.
```rust,ignore
impl<T> Iterator for RawValIter<T> {
type Item = T;
fn next(&mut self) -> Option<T> {
fn next(&mut self) -> Option<Self::Item> {
if self.start == self.end {
None
} else {
unsafe {
let result = ptr::read(self.start);
self.start = if mem::size_of::<T>() == 0 {
(self.start as usize + 1) as *const _
(self.start as usize + 1) as *const T
} else {
self.start.offset(1)
};
@ -146,21 +148,24 @@ impl<T> Iterator for RawValIter<T> {
}
fn size_hint(&self) -> (usize, Option<usize>) {
let elem_size = mem::size_of::<T>();
let len = (self.end as usize - self.start as usize)
/ if elem_size == 0 { 1 } else { elem_size };
let elem_size = if mem::size_of::<T>() == 0 {
1
} else {
mem::size_of::<T>()
};
let len = (self.end as usize - self.start as usize) / elem_size;
(len, Some(len))
}
}
impl<T> DoubleEndedIterator for RawValIter<T> {
fn next_back(&mut self) -> Option<T> {
fn next_back(&mut self) -> Option<Self::Item> {
if self.start == self.end {
None
} else {
unsafe {
self.end = if mem::size_of::<T>() == 0 {
(self.end as usize - 1) as *const _
(self.end as usize - 1) as *const T
} else {
self.end.offset(-1)
};

Loading…
Cancel
Save