Skip to content
Snippets Groups Projects
Unverified Commit c6407a0a authored by Yorick Peterse's avatar Yorick Peterse
Browse files

Remove async finalisation of objects

Finalising objects was handled in one of two ways:

1. When reclaiming blocks we would schedule blocks that needed
   finalisation. These blocks were processed in a separate thread.
2. When allocating into a block that needed finalisation, all objects in
   this block in need of finalisation would be finalised.

The idea of this approach was to move the expensive finalising of
objects out of the garbage collection phase, at the cost of delaying
finalisation a bit.

Unfortunately, this approach was anything but simple. This finalisation
approach also required various data structures taking up memory.
Finally, as part of the finalisation process we would have to scan over
all objects; instead of just lines.

In this commit we change the approach to a much simpler one. All async
related code and data structures are removed. When allocating an Immix
block, we zero out the block.

When allocating a new object into a previously used slot that needs
finalising, we finalise the object. This delays the finalising of
objects until their memory is reused. In the usual program this should
not pose a problem, as memory will be reused frequently. Indeed, this is
something we have observed with Inko's own test suite and a few other
test programs: memory usage actually goes down due to fewer data
structures needed, instead of going on.
parent bde4250c
No related branches found
No related tags found
No related merge requests found
Loading
Loading
@@ -43,9 +43,6 @@ pub struct Config {
/// The number of garbage collector threads to run.
pub gc_threads: usize,
 
/// The number of finalizer threads to run.
pub finalizer_threads: usize,
/// The number of threads to use for various generic parallel tasks such as
/// scanning stack frames during garbage collection.
pub generic_parallel_threads: usize,
Loading
Loading
@@ -89,7 +86,6 @@ impl Config {
directories: Vec::new(),
primary_threads: cpu_count,
gc_threads: cpu_count,
finalizer_threads: cpu_count,
blocking_threads: cpu_count,
generic_parallel_threads: cpu_count,
reductions: DEFAULT_REDUCTIONS,
Loading
Loading
@@ -112,7 +108,6 @@ impl Config {
set_from_env!(self, primary_threads, "CONCURRENCY", usize);
set_from_env!(self, blocking_threads, "CONCURRENCY", usize);
set_from_env!(self, gc_threads, "CONCURRENCY", usize);
set_from_env!(self, finalizer_threads, "CONCURRENCY", usize);
set_from_env!(self, generic_parallel_threads, "CONCURRENCY", usize);
 
set_from_env!(self, reductions, "REDUCTIONS", usize);
Loading
Loading
Loading
Loading
@@ -16,8 +16,6 @@ macro_rules! can_skip_pointer {
///
/// The pointer to promote is updated to point to the new location.
pub fn promote_mature(process: &RcProcess, pointer: &mut ObjectPointer) {
pointer.unmark_for_finalization();
{
let local_data = process.local_data_mut();
let old_obj = pointer.get_mut();
Loading
Loading
@@ -33,8 +31,6 @@ pub fn promote_mature(process: &RcProcess, pointer: &mut ObjectPointer) {
//
// The pointer to evacuate is updated to point to the new location.
pub fn evacuate(process: &RcProcess, pointer: &mut ObjectPointer) {
pointer.unmark_for_finalization();
{
// When evacuating an object we must ensure we evacuate the object into
// the same bucket.
Loading
Loading
Loading
Loading
@@ -8,13 +8,11 @@ use crate::immix::block_list::BlockIteratorMut;
use crate::immix::bucket::Bucket;
use crate::immix::bytemap::{Bytemap, LineMap, ObjectMap};
use crate::object::Object;
use crate::object_pointer::RawObjectPointer;
use parking_lot::Mutex;
use crate::object_pointer::{ObjectPointer, RawObjectPointer};
use std::alloc::{self, Layout};
use std::mem;
use std::ops::Drop;
use std::ptr;
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread;
 
/// The number of bytes in a block.
Loading
Loading
@@ -154,27 +152,11 @@ pub struct Block {
/// Memory is aligned to the block size.
pub lines: RawObjectPointer,
 
/// A flag that is set to true when this block is being finalized. This flag
/// is separate from the lock so we can effeciently check if we need to
/// finalize, without first having to acquire a lock.
pub finalizing: AtomicBool,
/// Bitmap used to track which lines contain one or more reachable objects.
pub used_lines_bitmap: LineMap,
 
/// Bitmap used for tracking which object slots are live.
pub marked_objects_bitmap: ObjectMap,
/// Bitmap used to track which objects need to be finalized when they are
/// garbage collected.
pub finalize_bitmap: ObjectMap,
/// Bitmap used to store which objects need to be finalized right now. This
/// bitmap will only contain entries for unmarked objects.
///
/// While an ObjectMap can be modified concurrently we wrap it in a mutex so
/// we can also synchronise any corresponding drop operations.
pub pending_finalization_bitmap: Mutex<ObjectMap>,
}
 
unsafe impl Send for Block {}
Loading
Loading
@@ -184,7 +166,7 @@ impl Block {
#[cfg_attr(feature = "cargo-clippy", allow(cast_ptr_alignment))]
pub fn boxed() -> Box<Block> {
let layout = unsafe { heap_layout_for_block() };
let lines = unsafe { alloc::alloc(layout) as RawObjectPointer };
let lines = unsafe { alloc::alloc_zeroed(layout) as RawObjectPointer };
 
if lines.is_null() {
alloc::handle_alloc_error(layout);
Loading
Loading
@@ -194,11 +176,8 @@ impl Block {
lines,
marked_objects_bitmap: ObjectMap::new(),
used_lines_bitmap: LineMap::new(),
finalize_bitmap: ObjectMap::new(),
free_pointer: DerefPointer::null(),
end_pointer: DerefPointer::null(),
finalizing: AtomicBool::new(false),
pending_finalization_bitmap: Mutex::new(ObjectMap::new()),
});
 
block.free_pointer = DerefPointer::from_pointer(block.start_address());
Loading
Loading
@@ -318,14 +297,6 @@ impl Block {
/// This method will return a None if no space is available in the current
/// block.
pub fn request_pointer(&mut self) -> Option<RawObjectPointer> {
// If the block is supposed to be finalized we'll finalize the entire
// block right away. This is much simpler to implement and removes the
// need for additional checks in future allocations into the current
// block.
if self.is_finalizing() {
self.finalize_pending();
}
loop {
let current = self.free_pointer();
let end = self.end_pointer();
Loading
Loading
@@ -384,10 +355,6 @@ impl Block {
pub unsafe fn request_pointer_for_mutator(
&mut self,
) -> Option<RawObjectPointer> {
if self.is_finalizing() {
self.finalize_pending();
}
loop {
let current = self.free_pointer.pointer;
let end = self.end_pointer.pointer;
Loading
Loading
@@ -450,10 +417,6 @@ impl Block {
self.set_end_pointer(end_addr);
 
self.reset_mark_bitmaps();
// We do not reset the "pending_finalization_bitmap" bitmap because this
// bitmap is cleared automatically during finalization / allocation.
self.finalize_bitmap.reset();
}
 
pub fn reset_mark_bitmaps(&mut self) {
Loading
Loading
@@ -463,65 +426,10 @@ impl Block {
 
/// Finalizes all unmarked objects right away.
pub fn finalize(&mut self) {
self.prepare_finalization();
self.finalize_pending();
}
/// Finalizes any pending objects. This may happen while the mutator is
/// running, thus extra synchronisation is required.
pub fn finalize_pending(&mut self) {
// We acquire the lock once for all pointers so we don't have to
// constantly lock and unlock it for every object that we need to
// finalize.
let mut bitmap = self.pending_finalization_bitmap.lock();
// It's possible another thread already finalized this block. To save us
// from doing redundant work we'll just bail out if this is the case.
if !self.is_finalizing() {
return;
}
for index in OBJECT_START_SLOT..OBJECTS_PER_BLOCK {
if bitmap.is_set(index) {
unsafe {
ptr::drop_in_place(self.lines.add(index));
}
let raw_ptr = unsafe { self.lines.add(index) };
 
bitmap.unset(index);
}
}
self.finalizing.store(false, Ordering::Release);
}
/// Prepares this block for concurrent finalization.
///
/// Returns true if this block should be finalized.
pub fn prepare_finalization(&mut self) -> bool {
// With blocks being scheduled in separate threads it's possible for us
// to collect a block that is still being finalized. In this case we'll
// try to complete the work before updating the pending bitmap with new
// entries.
if self.is_finalizing() {
self.finalize_pending();
}
let mut pending_bitmap = self.pending_finalization_bitmap.lock();
for index in OBJECT_START_SLOT..OBJECTS_PER_BLOCK {
if !self.marked_objects_bitmap.is_set(index)
&& self.finalize_bitmap.is_set(index)
{
pending_bitmap.set(index);
self.finalize_bitmap.unset(index);
}
}
if pending_bitmap.is_empty() {
false
} else {
self.finalizing.store(true, Ordering::Release);
true
ObjectPointer::new(raw_ptr).finalize();
}
}
 
Loading
Loading
@@ -563,11 +471,6 @@ impl Block {
BlockIteratorMut::starting_at(self)
}
 
#[inline(always)]
pub fn is_finalizing(&self) -> bool {
self.finalizing.load(Ordering::Acquire)
}
fn find_available_hole(
&mut self,
old_free: RawObjectPointer,
Loading
Loading
@@ -620,12 +523,7 @@ impl Block {
 
impl Drop for Block {
fn drop(&mut self) {
// Because we schedule block _pointers_ for finalization (and not owned
// blocks) it's possible we're about to drop a block that is still being
// finalized.
if self.is_finalizing() {
self.finalize_pending();
}
self.finalize();
 
unsafe {
alloc::dealloc(self.lines as *mut u8, heap_layout_for_block());
Loading
Loading
@@ -656,6 +554,11 @@ mod tests {
assert!(mem::size_of::<BlockHeader>() <= LINE_SIZE);
}
 
#[test]
fn test_block_type_size() {
assert_eq!(mem::size_of::<Block>(), 352);
}
#[test]
fn test_block_header_new() {
let mut block = Block::boxed();
Loading
Loading
@@ -1018,63 +921,20 @@ mod tests {
assert!(block.bucket().is_none());
assert!(block.used_lines_bitmap.is_empty());
assert!(block.marked_objects_bitmap.is_empty());
assert!(block.finalize_bitmap.is_empty());
}
 
#[test]
fn test_block_finalize() {
let mut block = Block::boxed();
let raw_pointer = block.request_pointer().unwrap();
let pointer = ObjectPointer::new(raw_pointer);
 
Object::new(ObjectValue::Float(10.0))
.write_to(block.request_pointer().unwrap());
block.finalize();
assert!(block.finalize_bitmap.is_empty());
}
#[test]
fn test_block_finalize_pending() {
let mut block = Block::boxed();
Object::new(ObjectValue::Float(10.0))
.write_to(block.request_pointer().unwrap());
block.prepare_finalization();
block.finalize_pending();
assert_eq!(block.is_finalizing(), false);
assert!(block.finalize_bitmap.is_empty());
assert!(block.pending_finalization_bitmap.lock().is_empty());
}
#[test]
fn test_block_prepare_finalization() {
let mut block = Block::boxed();
Object::new(ObjectValue::Float(10.0))
.write_to(block.request_pointer().unwrap());
block.prepare_finalization();
assert!(block.is_finalizing());
assert!(block.finalize_bitmap.is_empty());
assert_eq!(block.pending_finalization_bitmap.lock().is_empty(), false);
}
#[test]
fn test_block_prepare_finalization_twice() {
let mut block = Block::boxed();
Object::new(ObjectValue::Float(10.5))
.write_to(block.request_pointer().unwrap());
Object::new(ObjectValue::Float(10.0)).write_to(raw_pointer);
 
block.prepare_finalization();
block.prepare_finalization();
pointer.finalize();
 
assert_eq!(block.is_finalizing(), false);
assert!(block.finalize_bitmap.is_empty());
assert!(block.pending_finalization_bitmap.lock().is_empty());
assert!(pointer.get().value.is_none());
assert_eq!(pointer.is_finalizable(), false);
}
 
#[test]
Loading
Loading
Loading
Loading
@@ -9,7 +9,6 @@ use crate::immix::global_allocator::RcGlobalAllocator;
use crate::immix::histograms::Histograms;
use crate::object::Object;
use crate::object_pointer::ObjectPointer;
use crate::scheduler::pool::Pool;
use crate::vm::state::RcState;
use parking_lot::Mutex;
use rayon::prelude::*;
Loading
Loading
@@ -234,15 +233,12 @@ impl Bucket {
/// Recyclable blocks are scheduled for re-use by the allocator, empty
/// blocks are to be returned to the global pool, and full blocks are kept.
pub fn reclaim_blocks(&mut self, state: &RcState, histograms: &Histograms) {
let to_finalize = self
.blocks
self.blocks
.pointers()
.into_par_iter()
.filter_map(|mut block| {
.for_each(|mut block| {
block.update_line_map();
 
let finalize = block.prepare_finalization();
if block.is_empty() {
block.reset();
} else {
Loading
Loading
@@ -257,18 +253,7 @@ impl Bucket {
block.recycle();
}
}
if finalize {
Some(block)
} else {
None
}
})
.collect::<Vec<_>>();
if !to_finalize.is_empty() {
state.finalizer_pool.schedule(to_finalize);
}
});
 
// We partition the blocks in sequence so we don't need to synchronise
// access to the destination lists.
Loading
Loading
@@ -339,11 +324,9 @@ use std::ops::Drop;
#[cfg(test)]
impl Drop for Bucket {
fn drop(&mut self) {
// To prevent memory leaks in the tests we automatically finalize any
// data, removing the need for doing this manually in every test.
for mut block in self.blocks.drain() {
block.reset_mark_bitmaps();
block.finalize();
for block in self.blocks.drain() {
// Dropping the block also finalises it right away.
drop(block);
}
}
}
Loading
Loading
Loading
Loading
@@ -163,7 +163,6 @@ pub trait CopyObject: Sized {
}
 
to_copy.drop_attributes();
to_copy_ptr.unmark_for_finalization();
 
self.allocate_copy(copy)
}
Loading
Loading
@@ -177,7 +176,6 @@ mod tests {
use crate::config::Config;
use crate::deref_pointer::DerefPointer;
use crate::global_scope::{GlobalScope, GlobalScopePointer};
use crate::immix::bytemap::Bytemap;
use crate::immix::global_allocator::GlobalAllocator;
use crate::immix::local_allocator::LocalAllocator;
use crate::object::Object;
Loading
Loading
@@ -239,15 +237,11 @@ mod tests {
let name = dummy.allocator.allocate_empty();
 
ptr1.get_mut().add_attribute(name, ptr2);
ptr1.mark_for_finalization();
 
let copy = dummy.copy_object(ptr1);
let copy_index =
copy.block().object_index_of_pointer(copy.raw.untagged());
 
assert!(copy.is_finalizable());
assert!(copy.get().attributes_map().is_some());
assert!(copy.block().finalize_bitmap.is_set(copy_index));
}
 
#[test]
Loading
Loading
@@ -411,19 +405,14 @@ mod tests {
let name = dummy.allocator.allocate_empty();
 
ptr1.get_mut().add_attribute(name, ptr2);
ptr1.mark_for_finalization();
 
let copy = dummy.move_object(ptr1);
 
let copy_index =
copy.block().object_index_of_pointer(copy.raw.untagged());
assert_eq!(ptr1.is_finalizable(), false);
assert!(ptr1.get().attributes_map().is_none());
 
assert!(copy.is_finalizable());
assert!(copy.get().attributes_map().is_some());
assert!(copy.block().finalize_bitmap.is_set(copy_index));
}
 
#[test]
Loading
Loading
//! Permanent Object Allocator
//!
//! This allocator allocates objects that are never garbage collected.
use std::ops::Drop;
use crate::immix::bucket::{Bucket, PERMANENT};
use crate::immix::copy_object::CopyObject;
use crate::immix::global_allocator::RcGlobalAllocator;
use crate::object::Object;
use crate::object_pointer::ObjectPointer;
use crate::object_value;
use crate::object_value::ObjectValue;
use std::ops::Drop;
 
pub struct PermanentAllocator {
global_allocator: RcGlobalAllocator,
Loading
Loading
@@ -66,18 +63,10 @@ impl CopyObject for PermanentAllocator {
 
impl Drop for PermanentAllocator {
fn drop(&mut self) {
let blocks = &mut self.bucket.blocks;
for block in blocks.iter_mut() {
block.reset_mark_bitmaps();
// When dropping the permanent allocator there's no separate thread
// to push our work to, thus we finalize pointers right away.
block.finalize();
block.reset();
for block in self.bucket.blocks.drain() {
// Dropping the block also finalises it right away.
drop(block);
}
self.global_allocator.add_blocks(blocks);
}
}
 
Loading
Loading
@@ -135,13 +124,11 @@ mod tests {
#[test]
fn test_drop() {
let mut alloc = permanent_allocator();
let global_alloc = alloc.global_allocator.clone();
let pointer = alloc.allocate_empty();
 
drop(alloc);
let block = global_alloc.request_block();
alloc.allocate_empty();
 
assert!(&*block as *const _ == pointer.block() as *const _);
// This is just a smoke test to make sure the dropping doesn't crash in
// any way.
drop(alloc);
}
}
Loading
Loading
@@ -342,14 +342,15 @@ impl Object {
}
 
pub fn write_to(self, raw_pointer: RawObjectPointer) -> ObjectPointer {
unsafe {
ptr::write(raw_pointer, self);
}
let pointer = ObjectPointer::new(raw_pointer);
 
if pointer.is_finalizable() {
pointer.mark_for_finalization();
// Finalize the existing object, if needed. This must be done before we
// allocate the new object, otherwise we will leak memory.
pointer.finalize();
// Write the new data to the pointer.
unsafe {
ptr::write(raw_pointer, self);
}
 
pointer
Loading
Loading
Loading
Loading
@@ -34,22 +34,6 @@ use crate::socket::Socket;
use crate::tagged_pointer::TaggedPointer;
use crate::vm::state::RcState;
 
/// Performs a write to an object and tracks it in the write barrier.
macro_rules! write_object {
($receiver:expr, $process:expr, $action:expr, $value:expr) => {{
let track = !$receiver.get().has_attributes();
let pointer = *$receiver;
$action;
$process.write_barrier(pointer, $value);
if track && $receiver.is_finalizable() {
$receiver.mark_for_finalization();
}
}};
}
/// Defines a method for getting the value of an object as a given type.
macro_rules! def_value_getter {
($name: ident, $getter: ident, $as_type: ident, $ok_type: ty) => (
Loading
Loading
@@ -269,20 +253,6 @@ impl ObjectPointer {
&& self.block().bucket().unwrap().age <= YOUNG_MAX_AGE
}
 
pub fn mark_for_finalization(&self) {
let block = self.block_mut();
let index = block.object_index_of_pointer(self.raw.untagged());
block.finalize_bitmap.set(index);
}
pub fn unmark_for_finalization(&self) {
let block = self.block_mut();
let index = block.object_index_of_pointer(self.raw.untagged());
block.finalize_bitmap.unset(index);
}
/// Marks the current object and its line.
///
/// As this method is called often during collection, this method refers to
Loading
Loading
@@ -339,6 +309,15 @@ impl ObjectPointer {
!self.is_tagged_integer() && self.get().is_finalizable()
}
 
/// Finalizes the underlying object, if needed.
pub fn finalize(&self) {
if !self.is_finalizable() {
return;
}
drop(self.get_mut().take());
}
/// Adds an attribute to the object this pointer points to.
pub fn add_attribute(
&self,
Loading
Loading
@@ -346,12 +325,9 @@ impl ObjectPointer {
name: ObjectPointer,
attr: ObjectPointer,
) {
write_object!(
self,
process,
self.get_mut().add_attribute(name, attr),
attr
);
self.get_mut().add_attribute(name, attr);
process.write_barrier(*self, attr);
}
 
/// Looks up an attribute.
Loading
Loading
@@ -1109,8 +1085,6 @@ mod tests {
.get_mut()
.add_attribute(name, method);
 
state.integer_prototype.mark_for_finalization();
assert!(ptr.lookup_attribute(&state, name).unwrap() == method);
}
 
Loading
Loading
@@ -1126,8 +1100,6 @@ mod tests {
.get_mut()
.add_attribute(name, method);
 
state.integer_prototype.mark_for_finalization();
assert!(ptr.lookup_attribute_in_self(&state, name).unwrap() == method);
}
 
Loading
Loading
@@ -1148,7 +1120,6 @@ mod tests {
let value = state.permanent_allocator.lock().allocate_empty();
 
ptr.get_mut().add_attribute(name, value);
ptr.mark_for_finalization();
 
assert!(ptr.lookup_attribute(&state, name).unwrap() == value);
}
Loading
Loading
Loading
Loading
@@ -3,7 +3,6 @@ use crate::binding::RcBinding;
use crate::block::Block;
use crate::compiled_code::CompiledCodePointer;
use crate::config::Config;
use crate::deref_pointer::DerefPointer;
use crate::execution_context::ExecutionContext;
use crate::gc::work_list::WorkList;
use crate::global_scope::GlobalScopePointer;
Loading
Loading
@@ -14,7 +13,6 @@ use crate::immix::local_allocator::LocalAllocator;
use crate::mailbox::Mailbox;
use crate::object_pointer::ObjectPointer;
use crate::object_value;
use crate::scheduler::pool::Pool;
use crate::scheduler::timeouts::Timeout;
use crate::tagged_pointer::{self, TaggedPointer};
use crate::vm::state::RcState;
Loading
Loading
@@ -508,19 +506,9 @@ impl Process {
pub fn reclaim_and_finalize(&self, state: &RcState) {
let mut blocks = self.reclaim_all_blocks();
 
let to_finalize = blocks
.iter_mut()
.map(|block| {
block.reset_mark_bitmaps();
block.prepare_finalization();
block.reset();
DerefPointer::new(block)
})
.collect::<Vec<_>>();
if !to_finalize.is_empty() {
state.finalizer_pool.schedule(to_finalize);
for block in blocks.iter_mut() {
block.reset();
block.finalize();
}
 
state.global_allocator.add_blocks(&mut blocks);
Loading
Loading
Loading
Loading
@@ -182,7 +182,6 @@ impl Machine {
self.schedule_main_process(file);
 
let gc_pool_guard = self.start_gc_threads();
let finalizer_pool_guard = self.start_finalizer_threads();
let secondary_guard = self.start_blocking_threads();
let timeout_guard = self.start_timeout_worker_thread();
 
Loading
Loading
@@ -201,7 +200,6 @@ impl Machine {
if primary_guard.join().is_err()
|| secondary_guard.join().is_err()
|| gc_pool_guard.join().is_err()
|| finalizer_pool_guard.join().is_err()
|| timeout_guard.join().is_err()
{
self.state.set_exit_status(1);
Loading
Loading
@@ -245,14 +243,6 @@ impl Machine {
.start(move |_, mut request| request.perform())
}
 
fn start_finalizer_threads(&self) -> JoinList<()> {
self.state.finalizer_pool.start(move |_, blocks| {
for mut block in blocks {
block.finalize_pending();
}
})
}
fn start_timeout_worker_thread(&self) -> thread::JoinHandle<()> {
let state = self.state.clone();
 
Loading
Loading
@@ -278,7 +268,6 @@ impl Machine {
fn terminate(&self) {
self.state.scheduler.terminate();
self.state.gc_pool.terminate();
self.state.finalizer_pool.terminate();
self.state.timeout_worker.terminate();
}
 
Loading
Loading
Loading
Loading
@@ -251,10 +251,6 @@ pub fn drop_value(pointer: ObjectPointer) {
 
if object.value.is_some() {
drop(object.value.take());
if !object.has_attributes() {
pointer.unmark_for_finalization();
}
}
}
 
Loading
Loading
Loading
Loading
@@ -5,9 +5,7 @@
//! etc.
use crate::arc_without_weak::ArcWithoutWeak;
use crate::config::Config;
use crate::deref_pointer::DerefPointer;
use crate::gc::request::Request;
use crate::immix::block::Block;
use crate::immix::copy_object::CopyObject;
use crate::immix::global_allocator::{GlobalAllocator, RcGlobalAllocator};
use crate::immix::permanent_allocator::PermanentAllocator;
Loading
Loading
@@ -42,10 +40,6 @@ macro_rules! intern_string {
alloc.allocate_with_prototype(value, $state.string_prototype)
};
 
if ptr.is_finalizable() {
ptr.mark_for_finalization();
}
pool.add(ptr);
 
ptr
Loading
Loading
@@ -63,9 +57,6 @@ pub struct State {
/// The pool to use for garbage collection.
pub gc_pool: GenericPool<Request>,
 
/// The pool to use for finalizing objects.
pub finalizer_pool: GenericPool<Vec<DerefPointer<Block>>>,
/// The permanent memory allocator, used for global data.
pub permanent_allocator: Mutex<Box<PermanentAllocator>>,
 
Loading
Loading
@@ -176,9 +167,6 @@ impl State {
 
let gc_pool = GenericPool::new("GC".to_string(), config.gc_threads);
 
let finalizer_pool =
GenericPool::new("finalizer".to_string(), config.finalizer_threads);
let mut state = State {
scheduler: ProcessScheduler::new(
config.primary_threads,
Loading
Loading
@@ -186,7 +174,6 @@ impl State {
),
config,
gc_pool,
finalizer_pool,
permanent_allocator: Mutex::new(perm_alloc),
global_allocator: global_alloc,
string_pool: Mutex::new(StringPool::new()),
Loading
Loading
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment