Update vector code examples

The code samples were different from the final code and included
deprecated API calls.
pull/255/head
Lukas Hettwer 4 years ago committed by Alexis Beingessner
parent a3180eb461
commit a8584998ea

@ -16,10 +16,6 @@ want to use `dangling` because there's no real allocation to talk about but
So:
```rust,ignore
#![feature(alloc, heap_api)]
use std::mem;
impl<T> Vec<T> {
fn new() -> Self {
assert!(mem::size_of::<T>() != 0, "We're not ready to handle ZSTs");
@ -76,9 +72,7 @@ compiler to be able to reason about data dependencies and aliasing.
As a simple example, consider the following fragment of code:
```rust
# let x = &mut 0;
# let y = &mut 0;
```rust,ignore
*x *= 7;
*y *= 3;
```
@ -158,22 +152,18 @@ such we will guard against this case explicitly.
Ok with all the nonsense out of the way, let's actually allocate some memory:
```rust,ignore
use std::alloc::oom;
fn grow(&mut self) {
// this is all pretty delicate, so let's say it's all unsafe
unsafe {
// current API requires us to specify size and alignment manually.
let align = mem::align_of::<T>();
let elem_size = mem::size_of::<T>();
let (new_cap, ptr) = if self.cap == 0 {
let ptr = heap::allocate(elem_size, align);
let ptr = Global.allocate(Layout::array::<T>(1).unwrap());
(1, ptr)
} else {
// as an invariant, we can assume that `self.cap < isize::MAX`,
// so this doesn't need to be checked.
let new_cap = self.cap * 2;
let new_cap = 2 * self.cap;
// Similarly this can't overflow due to previously allocating this
let old_num_bytes = self.cap * elem_size;
@ -186,18 +176,24 @@ fn grow(&mut self) {
assert!(old_num_bytes <= (isize::MAX as usize) / 2,
"capacity overflow");
let new_num_bytes = old_num_bytes * 2;
let ptr = heap::reallocate(self.ptr.as_ptr() as *mut _,
old_num_bytes,
new_num_bytes,
align);
let c: NonNull<T> = self.ptr.into();
let ptr = Global.grow(c.cast(),
Layout::array::<T>(self.cap).unwrap(),
Layout::array::<T>(new_cap).unwrap());
(new_cap, ptr)
};
// If allocate or reallocate fail, we'll get `null` back
if ptr.is_null() { oom(); }
// If allocate or reallocate fail, oom
if ptr.is_err() {
handle_alloc_error(Layout::from_size_align_unchecked(
new_cap * elem_size,
mem::align_of::<T>(),
))
}
let ptr = ptr.unwrap();
self.ptr = Unique::new(ptr as *mut _);
self.ptr = Unique::new_unchecked(ptr.as_ptr() as *mut _);
self.cap = new_cap;
}
}
@ -205,4 +201,3 @@ fn grow(&mut self) {
Nothing particularly tricky here. Just computing sizes and alignments and doing
some careful multiplication checks.

@ -7,7 +7,7 @@ ask Rust if `T` `needs_drop` and omit the calls to `pop`. However in practice
LLVM is *really* good at removing simple side-effect free code like this, so I
wouldn't bother unless you notice it's not being stripped (in this case it is).
We must not call `heap::deallocate` when `self.cap == 0`, as in this case we
We must not call `Global.deallocate` when `self.cap == 0`, as in this case we
haven't actually allocated any memory.
@ -17,11 +17,10 @@ impl<T> Drop for Vec<T> {
if self.cap != 0 {
while let Some(_) = self.pop() { }
let align = mem::align_of::<T>();
let elem_size = mem::size_of::<T>();
let num_bytes = elem_size * self.cap;
unsafe {
heap::deallocate(self.ptr.as_ptr() as *mut _, num_bytes, align);
let c: NonNull<T> = self.ptr.into();
Global.deallocate(c.cast(),
Layout::array::<T>(self.cap).unwrap());
}
}
}

@ -26,7 +26,7 @@ impl<'a, T> Iterator for Drain<'a, T> {
-- wait, this is seeming familiar. Let's do some more compression. Both
IntoIter and Drain have the exact same structure, let's just factor it out.
```rust
```rust,ignore
struct RawValIter<T> {
start: *const T,
end: *const T,
@ -75,7 +75,7 @@ impl<T> DoubleEndedIterator for IntoIter<T> {
impl<T> Drop for IntoIter<T> {
fn drop(&mut self) {
for _ in &mut self.iter {}
for _ in &mut *self {}
}
}
@ -123,7 +123,7 @@ impl<'a, T> DoubleEndedIterator for Drain<'a, T> {
impl<'a, T> Drop for Drain<'a, T> {
fn drop(&mut self) {
for _ in &mut self.iter {}
for _ in &mut *self {}
}
}

@ -174,7 +174,7 @@ impl<T> Vec<T> {
impl<T> Drop for Vec<T> {
fn drop(&mut self) {
while let Some(_) = self.pop() {}
// allocation is handled by RawVec
// deallocation is handled by RawVec
}
}
@ -214,7 +214,7 @@ impl<T> RawValIter<T> {
slice.as_ptr()
} else {
slice.as_ptr().offset(slice.len() as isize)
}
},
}
}
}
@ -307,10 +307,10 @@ impl<'a, T> DoubleEndedIterator for Drain<'a, T> {
impl<'a, T> Drop for Drain<'a, T> {
fn drop(&mut self) {
// pre-drain the iter
for _ in &mut self.iter {}
for _ in &mut *self {}
}
}
#
# fn main() {
# tests::create_push_pop();
# tests::iter_test();
@ -318,7 +318,7 @@ impl<'a, T> Drop for Drain<'a, T> {
# tests::test_zst();
# println!("All tests finished OK");
# }
#
# mod tests {
# use super::*;
# pub fn create_push_pop() {

@ -22,11 +22,11 @@ pub fn insert(&mut self, index: usize, elem: T) {
unsafe {
if index < self.len {
// ptr::copy(src, dest, len): "copy from source to dest len elems"
ptr::copy(self.ptr.offset(index as isize),
self.ptr.offset(index as isize + 1),
ptr::copy(self.ptr.as_ptr().offset(index as isize),
self.ptr.as_ptr().offset(index as isize + 1),
self.len - index);
}
ptr::write(self.ptr.offset(index as isize), elem);
ptr::write(self.ptr.as_ptr().offset(index as isize), elem);
self.len += 1;
}
}
@ -41,9 +41,9 @@ pub fn remove(&mut self, index: usize) -> T {
assert!(index < self.len, "index out of bounds");
unsafe {
self.len -= 1;
let result = ptr::read(self.ptr.offset(index as isize));
ptr::copy(self.ptr.offset(index as isize + 1),
self.ptr.offset(index as isize),
let result = ptr::read(self.ptr.as_ptr().offset(index as isize));
ptr::copy(self.ptr.as_ptr().offset(index as isize + 1),
self.ptr.as_ptr().offset(index as isize),
self.len - index);
result
}

@ -43,7 +43,7 @@ dropped.
So we're going to use the following struct:
```rust,ignore
struct IntoIter<T> {
pub struct IntoIter<T> {
buf: Unique<T>,
cap: usize,
start: *const T,
@ -55,7 +55,7 @@ And this is what we end up with for initialization:
```rust,ignore
impl<T> Vec<T> {
fn into_iter(self) -> IntoIter<T> {
pub fn into_iter(self) -> IntoIter<T> {
// Can't destructure Vec since it's Drop
let ptr = self.ptr;
let cap = self.cap;
@ -68,13 +68,13 @@ impl<T> Vec<T> {
IntoIter {
buf: ptr,
cap: cap,
start: *ptr,
start: ptr.as_ptr(),
end: if cap == 0 {
// can't offset off this pointer, it's not allocated!
*ptr
ptr.as_ptr()
} else {
ptr.offset(len as isize)
}
ptr.as_ptr().offset(len as isize)
},
}
}
}
@ -135,11 +135,10 @@ impl<T> Drop for IntoIter<T> {
// drop any remaining elements
for _ in &mut *self {}
let align = mem::align_of::<T>();
let elem_size = mem::size_of::<T>();
let num_bytes = elem_size * self.cap;
unsafe {
heap::deallocate(self.buf.as_ptr() as *mut _, num_bytes, align);
let c: NonNull<T> = self.buf.into();
Global.deallocate(c.cast(),
Layout::array::<T>(self.cap).unwrap());
}
}
}

@ -6,13 +6,12 @@ elements that have been initialized.
Naively, this means we just want this design:
```rust
```rust,ignore
pub struct Vec<T> {
ptr: *mut T,
cap: usize,
len: usize,
}
# fn main() {}
```
And indeed this would compile. Unfortunately, it would be incorrect. First, the
@ -37,7 +36,7 @@ As a recap, Unique is a wrapper around a raw pointer that declares that:
We can implement all of the above requirements except for the last
one in stable Rust:
```rust
```rust,ignore
use std::marker::PhantomData;
use std::ops::Deref;
use std::mem;
@ -61,8 +60,6 @@ impl<T> Unique<T> {
self.ptr as *mut T
}
}
# fn main() {}
```
Unfortunately the mechanism for stating that your value is non-zero is
@ -70,18 +67,12 @@ unstable and unlikely to be stabilized soon. As such we're just going to
take the hit and use std's Unique:
```rust
#![feature(ptr_internals)]
use std::ptr::{Unique, self};
```rust,ignore
pub struct Vec<T> {
ptr: Unique<T>,
cap: usize,
len: usize,
}
# fn main() {}
```
If you don't care about the null-pointer optimization, then you can use the

@ -22,7 +22,7 @@ pub fn push(&mut self, elem: T) {
if self.len == self.cap { self.grow(); }
unsafe {
ptr::write(self.ptr.offset(self.len as isize), elem);
ptr::write(self.ptr.as_ptr().offset(self.len as isize), elem);
}
// Can't fail, we'll OOM first.
@ -48,7 +48,7 @@ pub fn pop(&mut self) -> Option<T> {
} else {
self.len -= 1;
unsafe {
Some(ptr::read(self.ptr.offset(self.len as isize)))
Some(ptr::read(self.ptr.as_ptr().offset(self.len as isize)))
}
}
}

@ -1,4 +1,3 @@
# RawVec
We've actually reached an interesting situation here: we've duplicated the logic
@ -17,46 +16,50 @@ struct RawVec<T> {
impl<T> RawVec<T> {
fn new() -> Self {
assert!(mem::size_of::<T>() != 0, "TODO: implement ZST support");
assert!(mem::size_of::<T>() != 0, "We're not ready to handle ZSTs");
RawVec { ptr: Unique::dangling(), cap: 0 }
}
// unchanged from Vec
fn grow(&mut self) {
unsafe {
let align = mem::align_of::<T>();
let elem_size = mem::size_of::<T>();
let (new_cap, ptr) = if self.cap == 0 {
let ptr = heap::allocate(elem_size, align);
let ptr = Global.allocate(Layout::array::<T>(1).unwrap());
(1, ptr)
} else {
let new_cap = 2 * self.cap;
let ptr = heap::reallocate(self.ptr.as_ptr() as *mut _,
self.cap * elem_size,
new_cap * elem_size,
align);
let c: NonNull<T> = self.ptr.into();
let ptr = Global.grow(c.cast(),
Layout::array::<T>(self.cap).unwrap(),
Layout::array::<T>(new_cap).unwrap());
(new_cap, ptr)
};
// If allocate or reallocate fail, we'll get `null` back
if ptr.is_null() { oom() }
// If allocate or reallocate fail, oom
if ptr.is_err() {
handle_alloc_error(Layout::from_size_align_unchecked(
new_cap * elem_size,
mem::align_of::<T>(),
))
}
self.ptr = Unique::new(ptr as *mut _);
let ptr = ptr.unwrap();
self.ptr = Unique::new_unchecked(ptr.as_ptr() as *mut _);
self.cap = new_cap;
}
}
}
impl<T> Drop for RawVec<T> {
fn drop(&mut self) {
if self.cap != 0 {
let align = mem::align_of::<T>();
let elem_size = mem::size_of::<T>();
let num_bytes = elem_size * self.cap;
unsafe {
heap::deallocate(self.ptr.as_mut() as *mut _, num_bytes, align);
let c: NonNull<T> = self.ptr.into();
Global.deallocate(c.cast(),
Layout::array::<T>(self.cap).unwrap());
}
}
}
@ -81,7 +84,7 @@ impl<T> Vec<T> {
}
// push/pop/insert/remove largely unchanged:
// * `self.ptr -> self.ptr()`
// * `self.ptr.as_ptr() -> self.ptr()`
// * `self.cap -> self.cap()`
// * `self.grow -> self.buf.grow()`
}
@ -97,7 +100,7 @@ impl<T> Drop for Vec<T> {
And finally we can really simplify IntoIter:
```rust,ignore
struct IntoIter<T> {
pub struct IntoIter<T> {
_buf: RawVec<T>, // we don't actually care about this. Just need it to live.
start: *const T,
end: *const T,
@ -123,8 +126,8 @@ impl<T> Vec<T> {
mem::forget(self);
IntoIter {
start: *buf.ptr,
end: buf.ptr.offset(len as isize),
start: buf.ptr.as_ptr(),
end: buf.ptr.as_ptr().offset(len as isize),
_buf: buf,
}
}

@ -11,7 +11,7 @@ zero-sized types. We need to be careful of two things:
C-style pointer iterator.
Thankfully we abstracted out pointer-iterators and allocating handling into
RawValIter and RawVec respectively. How mysteriously convenient.
`RawValIter` and `RawVec` respectively. How mysteriously convenient.
@ -30,7 +30,7 @@ no longer valid with zero-sized types. We must explicitly guard against capacity
overflow for zero-sized types.
Due to our current architecture, all this means is writing 3 guards, one in each
method of RawVec.
method of `RawVec`.
```rust,ignore
impl<T> RawVec<T> {
@ -50,24 +50,29 @@ impl<T> RawVec<T> {
// 0, getting to here necessarily means the Vec is overfull.
assert!(elem_size != 0, "capacity overflow");
let align = mem::align_of::<T>();
let (new_cap, ptr) = if self.cap == 0 {
let ptr = heap::allocate(elem_size, align);
let ptr = Global.allocate(Layout::array::<T>(1).unwrap());
(1, ptr)
} else {
let new_cap = 2 * self.cap;
let ptr = heap::reallocate(self.ptr.as_ptr() as *mut _,
self.cap * elem_size,
new_cap * elem_size,
align);
let c: NonNull<T> = self.ptr.into();
let ptr = Global.grow(c.cast(),
Layout::array::<T>(self.cap).unwrap(),
Layout::array::<T>(new_cap).unwrap());
(new_cap, ptr)
};
// If allocate or reallocate fail, we'll get `null` back
if ptr.is_null() { oom() }
// If allocate or reallocate fail, oom
if ptr.is_err() {
handle_alloc_error(Layout::from_size_align_unchecked(
new_cap * elem_size,
mem::align_of::<T>(),
))
}
self.ptr = Unique::new(ptr as *mut _);
let ptr = ptr.unwrap();
self.ptr = Unique::new_unchecked(ptr.as_ptr() as *mut _);
self.cap = new_cap;
}
}
@ -79,11 +84,10 @@ impl<T> Drop for RawVec<T> {
// don't free zero-sized allocations, as they were never allocated.
if self.cap != 0 && elem_size != 0 {
let align = mem::align_of::<T>();
let num_bytes = elem_size * self.cap;
unsafe {
heap::deallocate(self.ptr.as_ptr() as *mut _, num_bytes, align);
let c: NonNull<T> = self.ptr.into();
Global.deallocate(c.cast(),
Layout::array::<T>(self.cap).unwrap());
}
}
}
@ -114,7 +118,7 @@ impl<T> RawValIter<T> {
slice.as_ptr()
} else {
slice.as_ptr().offset(slice.len() as isize)
}
},
}
}
}

Loading…
Cancel
Save