diff --git a/api/sixtyfps-cpp/include/sixtyfps_sharedarray.h b/api/sixtyfps-cpp/include/sixtyfps_sharedarray.h index af0d4f356..dfa06dd37 100644 --- a/api/sixtyfps-cpp/include/sixtyfps_sharedarray.h +++ b/api/sixtyfps-cpp/include/sixtyfps_sharedarray.h @@ -9,6 +9,7 @@ LICENSE END */ #pragma once #include "sixtyfps_sharedarray_internal.h" +#include namespace sixtyfps { @@ -16,26 +17,29 @@ template struct SharedArray { SharedArray() - { - cbindgen_private::sixtyfps_shared_array_new_null(reinterpret_cast *>(this)); - } + : inner(const_cast(reinterpret_cast( + cbindgen_private::sixtyfps_shared_array_empty()))) + { } SharedArray(const SharedArray &other) + : inner(other.inner) { - cbindgen_private::sixtyfps_shared_array_clone( - reinterpret_cast *>(this), - reinterpret_cast *>(&other)); + if (inner->refcount > 0) { + ++inner->refcount; + } } ~SharedArray() { - cbindgen_private::sixtyfps_shared_array_drop(reinterpret_cast *>(this)); + drop(); } SharedArray &operator=(const SharedArray &other) { - cbindgen_private::sixtyfps_shared_array_drop(reinterpret_cast *>(this)); - cbindgen_private::sixtyfps_shared_array_clone( - reinterpret_cast *>(this), - reinterpret_cast *>(&other)); + if (other.inner == inner) { return *this; } + drop(); + inner = other.inner; + if (inner->refcount > 0) { + ++inner->refcount; + } return *this; } SharedArray &operator=(SharedArray &&other) @@ -44,7 +48,36 @@ struct SharedArray return *this; } + const T *begin() const { + return reinterpret_cast(inner + 1); + } + + const T *end() const { + return begin() + inner->size; + } + private: - void *inner; // opaque + + void drop() { + if (inner->refcount > 0 && (--inner->refcount) == 0) { + auto b = begin(), e = end(); + for (auto it = b; it < e; ++it) { + it->~T(); + } + } + cbindgen_private::sixtyfps_shared_array_free( + reinterpret_cast(inner), + sizeof(SharedArrayHeader) + inner->capacity * sizeof(T), + alignof(SharedArrayHeader)); + } + + // Unfortunately, this cannot be generated by cbindgen because std::atomic is not understood + struct SharedArrayHeader { + std::atomic refcount; + std::size_t size; + std::size_t capacity; + }; + static_assert(alignof(T) <= alignof(SharedArrayHeader), "Not yet supported because we would need to add padding"); + SharedArrayHeader *inner; }; } diff --git a/sixtyfps_runtime/corelib/lib.rs b/sixtyfps_runtime/corelib/lib.rs index 164788931..282552eb6 100644 --- a/sixtyfps_runtime/corelib/lib.rs +++ b/sixtyfps_runtime/corelib/lib.rs @@ -73,7 +73,7 @@ pub mod tests; pub fn use_modules() -> usize { tests::sixtyfps_mock_elapsed_time as usize + signals::ffi::sixtyfps_signal_init as usize - + sharedarray::ffi::sixtyfps_shared_array_drop as usize + + sharedarray::ffi::sixtyfps_shared_array_empty as usize + layout::solve_grid_layout as usize + item_tree::ffi::sixtyfps_visit_item_tree as usize + graphics::ffi::sixtyfps_new_path_elements as usize diff --git a/sixtyfps_runtime/corelib/sharedarray.rs b/sixtyfps_runtime/corelib/sharedarray.rs index 9689e394d..509af4288 100644 --- a/sixtyfps_runtime/corelib/sharedarray.rs +++ b/sixtyfps_runtime/corelib/sharedarray.rs @@ -10,74 +10,95 @@ LICENSE END */ //! module for the SharedArray and related things #![allow(unsafe_code)] #![warn(missing_docs)] +use core::fmt::Debug; use core::mem::MaybeUninit; -use std::{fmt::Debug, fmt::Display, ops::Deref}; -use triomphe::{Arc, HeaderWithLength, ThinArc}; +use core::ops::Deref; +use core::ptr::NonNull; +use core::sync::atomic; +use std::alloc; + +#[repr(C)] +struct SharedArrayHeader { + refcount: atomic::AtomicIsize, + size: usize, + capacity: usize, +} + +#[repr(C)] +struct SharedArrayInner { + header: SharedArrayHeader, + data: MaybeUninit, +} + +fn compute_inner_layout(capacity: usize) -> alloc::Layout { + alloc::Layout::new::() + .extend(alloc::Layout::array::(capacity).unwrap()) + .unwrap() + .0 +} + +unsafe fn drop_inner(inner: NonNull>) { + debug_assert_eq!(inner.as_ref().header.refcount.load(core::sync::atomic::Ordering::Relaxed), 0); + let data_ptr = inner.as_ref().data.as_ptr(); + for x in 0..inner.as_ref().header.size { + drop(core::ptr::read(data_ptr.add(x))); + } + alloc::dealloc( + inner.as_ptr() as *mut u8, + compute_inner_layout::(inner.as_ref().header.capacity), + ) +} + +/// Allocate the memory for the SharedArray with the given capacity. Return the inner with size and refcount set to 1 +fn alloc_with_capacity(capacity: usize) -> NonNull> { + let ptr = unsafe { alloc::alloc(compute_inner_layout::(capacity)) }; + unsafe { + core::ptr::write( + ptr as *mut SharedArrayHeader, + SharedArrayHeader { refcount: 1.into(), size: 0, capacity }, + ); + } + NonNull::new(ptr).unwrap().cast() +} -#[derive(Clone)] #[repr(C)] /// SharedArray holds a reference-counted read-only copy of `[T]`. -pub struct SharedArray { - /// Invariant: The usize header is the `len` of the vector, the contained buffer is `[T]` - inner: ThinArc>, +pub struct SharedArray { + inner: NonNull>, } -struct PaddingFillingIter<'a, U> { - iter: &'a mut dyn Iterator>, - pos: usize, - len: usize, - padding_elements: usize, -} - -impl<'a, U> PaddingFillingIter<'a, U> { - fn new(len: usize, iter: &'a mut dyn Iterator>) -> Self { - let alignment = core::mem::align_of::(); - let mut padding_elements = if len == 0 { 1 } else { 0 }; // ThinArc can't deal with empty arrays, so add padding for empty arrays. - - // Add padding to ensure that the size in bytes is a multiple of the pointer alignment. This can mean different - // increments depending on whether sizeof(U) is less or greater than align_of(usize). - loop { - let size_in_bytes = (len + padding_elements) * core::mem::size_of::(); - let byte_aligned_size = (size_in_bytes + alignment - 1) & !(alignment - 1); - let padding_bytes = byte_aligned_size - size_in_bytes; - if padding_bytes == 0 { - break; +impl Drop for SharedArray { + fn drop(&mut self) { + unsafe { + if self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) < 0 { + return; + } + if self.inner.as_ref().header.refcount.fetch_sub(1, atomic::Ordering::SeqCst) == 1 { + drop_inner(self.inner) } - padding_elements += 1; } - - Self { iter, pos: 0, len, padding_elements } } } -impl<'a, U: Clone> Iterator for PaddingFillingIter<'a, U> { - type Item = MaybeUninit; - fn next(&mut self) -> Option> { - let pos = self.pos; - self.pos += 1; - if pos < self.len { - self.iter.next() - } else if pos < self.len + self.padding_elements { - Some(MaybeUninit::uninit()) - } else { - None +impl Clone for SharedArray { + fn clone(&self) -> Self { + unsafe { + if self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) > 0 { + self.inner.as_ref().header.refcount.fetch_add(1, atomic::Ordering::SeqCst); + } + return SharedArray { inner: self.inner }; } } - fn size_hint(&self) -> (usize, Option) { - let l = self.len + self.padding_elements; - (l, Some(l)) - } } -impl<'a, U: Clone> core::iter::ExactSizeIterator for PaddingFillingIter<'a, U> {} -impl SharedArray { +impl SharedArray { fn as_ptr(&self) -> *const T { - self.inner.slice.as_ptr() as *const T + unsafe { self.inner.as_ref().data.as_ptr() } } /// Size of the string, in bytes pub fn len(&self) -> usize { - self.inner.header.header + unsafe { self.inner.as_ref().header.size } } /// Return a slice to the array @@ -86,67 +107,54 @@ impl SharedArray { } /// Constructs a new SharedArray from the given iterator. - pub fn from_iter(iter: impl Iterator + ExactSizeIterator) -> Self { - let len = iter.len(); - let item_iter = &mut iter.map(|item| MaybeUninit::new(item)); - let iter = PaddingFillingIter::new(len, item_iter); - - SharedArray { - inner: Arc::into_thin(Arc::from_header_and_iter( - HeaderWithLength::new(len, iter.size_hint().0), - iter, - )), + pub fn from_iter(mut iter: impl Iterator + ExactSizeIterator) -> Self { + let capacity = iter.len(); + let inner = alloc_with_capacity::(capacity); + let mut result = SharedArray { inner }; + let mut size = 0; + while let Some(x) = iter.next() { + assert_ne!(size, capacity); + unsafe { + core::ptr::write(result.inner.as_mut().data.as_mut_ptr().add(size), x); + size += 1; + result.inner.as_mut().header.size = size; + } } + result } /// Constructs a new SharedArray from the given slice. - pub fn from(slice: &[T]) -> Self { + pub fn from(slice: &[T]) -> Self + where + T: Clone, + { SharedArray::from_iter(slice.iter().cloned()) } } -impl Deref for SharedArray { +impl Deref for SharedArray { type Target = [T]; fn deref(&self) -> &Self::Target { self.as_slice() } } -trait StaticNull: Sized + 'static { - const NULL: once_cell::sync::Lazy>>; -} -impl StaticNull for T { - const NULL: once_cell::sync::Lazy>> = - once_cell::sync::Lazy::new(|| { - let len = 0; - let null_iter = &mut std::iter::empty(); - let iter = PaddingFillingIter::new(len, null_iter); - Arc::into_thin(Arc::from_header_and_iter( - HeaderWithLength::new(len, iter.size_hint().0), - iter, - )) - }); -} +static SHARED_NULL: SharedArrayHeader = + SharedArrayHeader { refcount: std::sync::atomic::AtomicIsize::new(-1), size: 0, capacity: 0 }; -impl Default for SharedArray { +impl Default for SharedArray { fn default() -> Self { - SharedArray { inner: StaticNull::NULL.clone() } + SharedArray { inner: NonNull::from(&SHARED_NULL).cast() } } } -impl Debug for SharedArray { +impl Debug for SharedArray { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.as_slice().fmt(f) } } -impl Display for SharedArray { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - self.as_slice().fmt(f) - } -} - -impl AsRef<[T]> for SharedArray { +impl AsRef<[T]> for SharedArray { #[inline] fn as_ref(&self) -> &[T] { self.as_slice() @@ -156,14 +164,85 @@ impl AsRef<[T]> for SharedArray { impl PartialEq for SharedArray where U: ?Sized + AsRef<[T]>, - T: Clone + PartialEq, + T: PartialEq, { fn eq(&self, other: &U) -> bool { self.as_slice() == other.as_ref() } } -impl Eq for SharedArray {} +impl Eq for SharedArray {} + +impl IntoIterator for SharedArray { + type Item = T; + type IntoIter = IntoIter; + fn into_iter(self) -> Self::IntoIter { + IntoIter(unsafe { + if self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) == 1 { + let inner = self.inner; + std::mem::forget(self); + inner.as_ref().header.refcount.store(0, atomic::Ordering::Relaxed); + IntoIterInner::UnShared(inner, 0) + } else { + IntoIterInner::Shared(self, 0) + } + }) + } +} + +enum IntoIterInner { + Shared(SharedArray, usize), + // Elements up to the usize member are already moved out + UnShared(NonNull>, usize), +} + +impl Drop for IntoIterInner { + fn drop(&mut self) { + match self { + IntoIterInner::Shared(..) => { /* drop of SharedArray takes care of it */ } + IntoIterInner::UnShared(inner, begin) => unsafe { + debug_assert_eq!(inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed), 0); + let data_ptr = inner.as_ref().data.as_ptr(); + for x in (*begin)..inner.as_ref().header.size { + drop(core::ptr::read(data_ptr.add(x))); + } + alloc::dealloc( + inner.as_ptr() as *mut u8, + compute_inner_layout::(inner.as_ref().header.capacity), + ) + }, + } + } +} + +/// An iterator that moves out of a SharedArray. +/// +/// This `struct` is created by the `into_iter` method on [`SharedArray`] (provided +/// by the [`IntoIterator`] trait). +pub struct IntoIter(IntoIterInner); + +impl Iterator for IntoIter { + type Item = T; + + fn next(&mut self) -> Option { + match &mut self.0 { + IntoIterInner::Shared(array, moved) => { + let result = array.as_slice().get(*moved).cloned(); + *moved += 1; + result + } + IntoIterInner::UnShared(inner, begin) => unsafe { + if *begin < inner.as_ref().header.size { + let r = core::ptr::read(inner.as_ref().data.as_ptr().add(*begin)); + *begin += 1; + Some(r) + } else { + None + } + }, + } + } +} #[test] fn simple_test() { @@ -184,24 +263,20 @@ pub(crate) mod ffi { use super::*; #[no_mangle] - /// This function is used for the low-level C++ interface to allocate the backing vector for an empty shared array. - pub unsafe extern "C" fn sixtyfps_shared_array_new_null(out: *mut SharedArray) { - core::ptr::write(out, SharedArray::::default()); + /// This function is used for the low-level C++ interface to allocate the backing vector of a SharedArray. + pub unsafe extern "C" fn sixtyfps_shared_array_allocate(size: usize, align: usize) -> *mut u8 { + std::alloc::alloc(std::alloc::Layout::from_size_align(size, align).unwrap()) } #[no_mangle] - /// This function is used for the low-level C++ interface to clone a shared array by increasing its reference count. - pub unsafe extern "C" fn sixtyfps_shared_array_clone( - out: *mut SharedArray, - source: &SharedArray, - ) { - core::ptr::write(out, source.clone()); + /// This function is used for the low-level C++ interface to deallocate the backing vector of a SharedArray + pub unsafe extern "C" fn sixtyfps_shared_array_free(ptr: *mut u8, size: usize, align: usize) { + std::alloc::dealloc(ptr, std::alloc::Layout::from_size_align(size, align).unwrap()) } #[no_mangle] - /// This function is used for the low-level C++ interface to decrease the reference count of a shared array. - pub unsafe extern "C" fn sixtyfps_shared_array_drop(out: *mut SharedArray) { - // ?? This won't call drop on the right type... - core::ptr::read(out); + /// This function is used for the low-level C++ interface to initialize the empty SharedArray. + pub unsafe extern "C" fn sixtyfps_shared_array_empty() -> *const u8 { + &SHARED_NULL as *const _ as *const u8 } }