3535extern crate alloc;
3636extern crate rustc_data_structures;
3737
38- use rustc_data_structures:: sync:: MTLock ;
38+ use rustc_data_structures:: defer_deallocs:: DeferDeallocs ;
39+ use rustc_data_structures:: sync:: { MTLock , WorkerLocal } ;
3940
4041use std:: cell:: { Cell , RefCell } ;
4142use std:: cmp;
@@ -44,7 +45,6 @@ use std::marker::{PhantomData, Send};
4445use std:: mem;
4546use std:: ptr;
4647use std:: slice;
47-
4848use alloc:: raw_vec:: RawVec ;
4949
5050/// An arena that can hold objects of only one type.
@@ -132,30 +132,54 @@ impl<T> TypedArena<T> {
132132 /// Allocates an object in the `TypedArena`, returning a reference to it.
133133 #[ inline]
134134 pub fn alloc ( & self , object : T ) -> & mut T {
135- if self . ptr == self . end {
136- self . grow ( 1 )
137- }
135+ // Zero sized path
136+ if mem:: size_of :: < T > ( ) == 0 {
137+ if self . ptr == self . end {
138+ self . grow ( 1 )
139+ }
138140
139- unsafe {
140- if mem:: size_of :: < T > ( ) == 0 {
141+ unsafe {
141142 self . ptr
142143 . set ( intrinsics:: arith_offset ( self . ptr . get ( ) as * mut u8 , 1 )
143144 as * mut T ) ;
144145 let ptr = mem:: align_of :: < T > ( ) as * mut T ;
145146 // Don't drop the object. This `write` is equivalent to `forget`.
146147 ptr:: write ( ptr, object) ;
147- & mut * ptr
148+ return & mut * ptr;
149+ }
150+ }
151+
152+ let ptr = self . ptr . get ( ) ;
153+
154+ unsafe {
155+ if std:: intrinsics:: unlikely ( ptr == self . end . get ( ) ) {
156+ self . grow_and_alloc ( object)
148157 } else {
149- let ptr = self . ptr . get ( ) ;
150- // Advance the pointer.
151- self . ptr . set ( self . ptr . get ( ) . offset ( 1 ) ) ;
152- // Write into uninitialized memory.
153- ptr:: write ( ptr, object) ;
154- & mut * ptr
158+ self . alloc_unchecked ( ptr, object)
155159 }
156160 }
157161 }
158162
163+ #[ inline( always) ]
164+ unsafe fn alloc_unchecked ( & self , ptr : * mut T , object : T ) -> & mut T {
165+ // Advance the pointer.
166+ self . ptr . set ( ptr. offset ( 1 ) ) ;
167+ // Write into uninitialized memory.
168+ ptr:: write ( ptr, object) ;
169+ & mut * ptr
170+ }
171+
172+ #[ inline( never) ]
173+ #[ cold]
174+ fn grow_and_alloc ( & self , object : T ) -> & mut T {
175+ // We move the object in this function so if it has a destructor
176+ // the fast path need not have an unwind handler to destroy it
177+ self . grow ( 1 ) ;
178+ unsafe {
179+ self . alloc_unchecked ( self . ptr . get ( ) , object)
180+ }
181+ }
182+
159183 /// Allocates a slice of objects that are copied into the `TypedArena`, returning a mutable
160184 /// reference to it. Will panic if passed a zero-sized types.
161185 ///
@@ -174,7 +198,7 @@ impl<T> TypedArena<T> {
174198 let available_capacity_bytes = self . end . get ( ) as usize - self . ptr . get ( ) as usize ;
175199 let at_least_bytes = slice. len ( ) * mem:: size_of :: < T > ( ) ;
176200 if available_capacity_bytes < at_least_bytes {
177- self . grow ( slice. len ( ) ) ;
201+ self . grow_slice ( slice. len ( ) ) ;
178202 }
179203
180204 unsafe {
@@ -186,9 +210,14 @@ impl<T> TypedArena<T> {
186210 }
187211 }
188212
189- /// Grows the arena.
190213 #[ inline( never) ]
191214 #[ cold]
215+ fn grow_slice ( & self , n : usize ) {
216+ self . grow ( n)
217+ }
218+
219+ /// Grows the arena.
220+ #[ inline( always) ]
192221 fn grow ( & self , n : usize ) {
193222 unsafe {
194223 let mut chunks = self . chunks . borrow_mut ( ) ;
@@ -283,6 +312,22 @@ unsafe impl<#[may_dangle] T> Drop for TypedArena<T> {
283312
284313unsafe impl < T : Send > Send for TypedArena < T > { }
285314
315+ type BackingType = usize ;
316+ const BLOCK_SIZE : usize = std:: mem:: size_of :: < BackingType > ( ) ;
317+
318+ #[ inline( always) ]
319+ fn required_backing_types ( bytes : usize ) -> usize {
320+ assert ! ( BLOCK_SIZE . is_power_of_two( ) ) ;
321+ // FIXME: This addition could overflow
322+ ( bytes + BLOCK_SIZE - 1 ) / BLOCK_SIZE
323+ }
324+
325+ #[ inline( always) ]
326+ fn align ( val : usize , align : usize ) -> usize {
327+ assert ! ( align. is_power_of_two( ) ) ;
328+ ( val + align - 1 ) & !( align - 1 )
329+ }
330+
286331pub struct DroplessArena {
287332 /// A pointer to the next object to be allocated.
288333 ptr : Cell < * mut u8 > ,
@@ -292,7 +337,42 @@ pub struct DroplessArena {
292337 end : Cell < * mut u8 > ,
293338
294339 /// A vector of arena chunks.
295- chunks : RefCell < Vec < TypedArenaChunk < u8 > > > ,
340+ chunks : RefCell < Vec < TypedArenaChunk < BackingType > > > ,
341+ }
342+
343+ #[ no_mangle]
344+ pub fn tatest1 ( a : & TypedArena < usize > ) -> & usize {
345+ a. alloc ( 64usize )
346+ }
347+
348+ #[ no_mangle]
349+ pub fn atest1 ( a : & DroplessArena ) -> & usize {
350+ a. alloc ( 64usize )
351+ }
352+
353+ #[ no_mangle]
354+ pub fn atest2 ( a : & SyncDroplessArena , b : Box < usize > ) -> & Box < usize > {
355+ a. promote ( b)
356+ }
357+
358+ #[ no_mangle]
359+ pub fn atest6 ( a : & SyncDroplessArena , b : usize ) -> & usize {
360+ a. promote ( b)
361+ }
362+
363+ #[ no_mangle]
364+ pub fn atest3 ( a : & DroplessArena ) {
365+ a. align ( 8 ) ;
366+ }
367+
368+ #[ no_mangle]
369+ pub fn atest4 ( a : & DroplessArena ) {
370+ a. align ( 16 ) ;
371+ }
372+
373+ #[ no_mangle]
374+ pub fn atest5 ( a : & DroplessArena ) {
375+ a. align ( 4 ) ;
296376}
297377
298378unsafe impl Send for DroplessArena { }
@@ -310,7 +390,7 @@ impl Default for DroplessArena {
310390
311391impl DroplessArena {
312392 pub fn in_arena < T : ?Sized > ( & self , ptr : * const T ) -> bool {
313- let ptr = ptr as * const u8 as * mut u8 ;
393+ let ptr = ptr as * const u8 as * mut BackingType ;
314394 for chunk in & * self . chunks . borrow ( ) {
315395 if chunk. start ( ) <= ptr && ptr < chunk. end ( ) {
316396 return true ;
@@ -322,62 +402,93 @@ impl DroplessArena {
322402
323403 #[ inline]
324404 fn align ( & self , align : usize ) {
405+ // FIXME: The addition of `align` could overflow, in which case final_address
406+ // will be 0. Do we have any guarantee that our chunk won't end up as the final
407+ // bytes in our memory space?
325408 let final_address = ( ( self . ptr . get ( ) as usize ) + align - 1 ) & !( align - 1 ) ;
326409 self . ptr . set ( final_address as * mut u8 ) ;
327- assert ! ( self . ptr <= self . end) ;
410+
411+ // Aligning to the block_size cannot go outside our current chuck, just to its end
412+ if align > BLOCK_SIZE {
413+ // For larger alignments we have to check that we didn't go out of bounds
414+ assert ! ( self . ptr <= self . end) ;
415+ }
328416 }
329417
330- #[ inline( never) ]
331- #[ cold]
332418 fn grow ( & self , needed_bytes : usize ) {
333419 unsafe {
420+ let needed_vals = required_backing_types ( needed_bytes) ;
334421 let mut chunks = self . chunks . borrow_mut ( ) ;
335422 let ( chunk, mut new_capacity) ;
336423 if let Some ( last_chunk) = chunks. last_mut ( ) {
337424 let used_bytes = self . ptr . get ( ) as usize - last_chunk. start ( ) as usize ;
425+ let used_vals = required_backing_types ( used_bytes) ;
338426 if last_chunk
339427 . storage
340- . reserve_in_place ( used_bytes , needed_bytes )
428+ . reserve_in_place ( used_vals , needed_vals )
341429 {
342- self . end . set ( last_chunk. end ( ) ) ;
430+ self . end . set ( last_chunk. end ( ) as * mut u8 ) ;
343431 return ;
344432 } else {
345433 new_capacity = last_chunk. storage . cap ( ) ;
346434 loop {
347435 new_capacity = new_capacity. checked_mul ( 2 ) . unwrap ( ) ;
348- if new_capacity >= used_bytes + needed_bytes {
436+ if new_capacity >= used_vals + needed_vals {
349437 break ;
350438 }
351439 }
352440 }
353441 } else {
354- new_capacity = cmp:: max ( needed_bytes , PAGE ) ;
442+ new_capacity = cmp:: max ( needed_vals , required_backing_types ( PAGE ) ) ;
355443 }
356- chunk = TypedArenaChunk :: < u8 > :: new ( new_capacity) ;
357- self . ptr . set ( chunk. start ( ) ) ;
358- self . end . set ( chunk. end ( ) ) ;
444+ chunk = TypedArenaChunk :: < BackingType > :: new ( new_capacity) ;
445+ self . ptr . set ( chunk. start ( ) as * mut u8 ) ;
446+ self . end . set ( chunk. end ( ) as * mut u8 ) ;
359447 chunks. push ( chunk) ;
360448 }
361449 }
362450
451+ #[ inline( never) ]
452+ #[ cold]
453+ fn grow_and_alloc_raw ( & self , bytes : usize ) -> & mut [ u8 ] {
454+ self . grow ( bytes) ;
455+ unsafe {
456+ self . alloc_raw_unchecked ( self . ptr . get ( ) , bytes)
457+ }
458+ }
459+
460+ #[ inline( always) ]
461+ unsafe fn alloc_raw_unchecked ( & self , start : * mut u8 , bytes : usize ) -> & mut [ u8 ] {
462+ // Tell LLVM that `start` is aligned to BLOCK_SIZE
463+ std:: intrinsics:: assume ( start as usize == align ( start as usize , BLOCK_SIZE ) ) ;
464+
465+ // Set the pointer past ourselves and align it
466+ let end = start. offset ( bytes as isize ) as usize ;
467+ let end = align ( end, BLOCK_SIZE ) as * mut u8 ;
468+ self . ptr . set ( end) ;
469+
470+ // Return the result
471+ slice:: from_raw_parts_mut ( start, bytes)
472+ }
473+
363474 #[ inline]
364475 pub fn alloc_raw ( & self , bytes : usize , align : usize ) -> & mut [ u8 ] {
476+ // FIXME: Always align to 8 bytes here? Or usize alignment
365477 unsafe {
366478 assert ! ( bytes != 0 ) ;
479+ assert ! ( align <= BLOCK_SIZE ) ;
480+ assert ! ( std:: mem:: align_of:: <BackingType >( ) == std:: mem:: size_of:: <BackingType >( ) ) ;
481+ // FIXME: Check that `bytes` fit in a isize
367482
368- self . align ( align) ;
369-
370- let future_end = intrinsics:: arith_offset ( self . ptr . get ( ) , bytes as isize ) ;
371- if ( future_end as * mut u8 ) >= self . end . get ( ) {
372- self . grow ( bytes) ;
373- }
374-
483+ // FIXME: arith_offset could overflow here.
484+ // Find some way to guarantee this doesn't happen for small fixed size types
375485 let ptr = self . ptr . get ( ) ;
376- // Set the pointer past ourselves
377- self . ptr . set (
378- intrinsics:: arith_offset ( self . ptr . get ( ) , bytes as isize ) as * mut u8 ,
379- ) ;
380- slice:: from_raw_parts_mut ( ptr, bytes)
486+ let future_end = intrinsics:: arith_offset ( ptr, bytes as isize ) ;
487+ if std:: intrinsics:: unlikely ( ( future_end as * mut u8 ) >= self . end . get ( ) ) {
488+ self . grow_and_alloc_raw ( bytes)
489+ } else {
490+ self . alloc_raw_unchecked ( ptr, bytes)
491+ }
381492 }
382493 }
383494
@@ -452,12 +563,39 @@ impl<T> SyncTypedArena<T> {
452563 }
453564}
454565
455- #[ derive( Default ) ]
566+ struct DropType {
567+ drop_fn : unsafe fn ( * mut u8 ) ,
568+ obj : * mut u8 ,
569+ }
570+
571+ unsafe fn drop_for_type < T > ( to_drop : * mut u8 ) {
572+ std:: ptr:: drop_in_place ( to_drop as * mut T )
573+ }
574+
575+ impl Drop for DropType {
576+ fn drop ( & mut self ) {
577+ unsafe {
578+ ( self . drop_fn ) ( self . obj )
579+ }
580+ }
581+ }
582+
456583pub struct SyncDroplessArena {
584+ // Ordered so `deferred` gets dropped before the arena
585+ // since its destructor can reference memory in the arena
586+ deferred : WorkerLocal < TypedArena < DropType > > ,
457587 lock : MTLock < DroplessArena > ,
458588}
459589
460590impl SyncDroplessArena {
591+ #[ inline]
592+ pub fn new ( ) -> Self {
593+ SyncDroplessArena {
594+ lock : Default :: default ( ) ,
595+ deferred : WorkerLocal :: new ( |_| Default :: default ( ) ) ,
596+ }
597+ }
598+
461599 #[ inline( always) ]
462600 pub fn in_arena < T : ?Sized > ( & self , ptr : * const T ) -> bool {
463601 self . lock . lock ( ) . in_arena ( ptr)
@@ -483,6 +621,28 @@ impl SyncDroplessArena {
483621 // Extend the lifetime of the result since it's limited to the lock guard
484622 unsafe { & mut * ( self . lock . lock ( ) . alloc_slice ( slice) as * mut [ T ] ) }
485623 }
624+
625+ #[ inline]
626+ pub fn promote < T : DeferDeallocs > ( & self , object : T ) -> & T {
627+ let mem = self . alloc_raw ( mem:: size_of :: < T > ( ) , mem:: align_of :: < T > ( ) ) as * mut _ as * mut T ;
628+ let result = unsafe {
629+ // Write into uninitialized memory.
630+ ptr:: write ( mem, object) ;
631+ & mut * mem
632+ } ;
633+ // Record the destructor after doing the allocation as that may panic
634+ // and would cause `object` destuctor to run twice if it was recorded before
635+ self . deferred . alloc ( DropType {
636+ drop_fn : drop_for_type :: < T > ,
637+ obj : result as * mut T as * mut u8 ,
638+ } ) ;
639+ result
640+ }
641+
642+ #[ inline( always) ]
643+ pub fn promote_vec < T : DeferDeallocs > ( & self , vec : Vec < T > ) -> & [ T ] {
644+ & self . promote ( vec) [ ..]
645+ }
486646}
487647
488648#[ cfg( test) ]
0 commit comments