Change the implementation of SharedArray so that it can destruct its contents

This commit is contained in:
Olivier Goffart 2020-09-04 14:04:06 +02:00
parent e19ad3006a
commit c8fa3354be
3 changed files with 221 additions and 113 deletions

View file

@ -9,6 +9,7 @@
LICENSE END */ LICENSE END */
#pragma once #pragma once
#include "sixtyfps_sharedarray_internal.h" #include "sixtyfps_sharedarray_internal.h"
#include <atomic>
namespace sixtyfps { namespace sixtyfps {
@ -16,26 +17,29 @@ template<typename T>
struct SharedArray struct SharedArray
{ {
SharedArray() SharedArray()
{ : inner(const_cast<SharedArrayHeader*>(reinterpret_cast<const SharedArrayHeader*>(
cbindgen_private::sixtyfps_shared_array_new_null(reinterpret_cast<SharedArray<uint8_t> *>(this)); cbindgen_private::sixtyfps_shared_array_empty())))
} { }
SharedArray(const SharedArray &other) SharedArray(const SharedArray &other)
: inner(other.inner)
{ {
cbindgen_private::sixtyfps_shared_array_clone( if (inner->refcount > 0) {
reinterpret_cast<SharedArray<uint8_t> *>(this), ++inner->refcount;
reinterpret_cast<const SharedArray<uint8_t> *>(&other)); }
} }
~SharedArray() ~SharedArray()
{ {
cbindgen_private::sixtyfps_shared_array_drop(reinterpret_cast<SharedArray<uint8_t> *>(this)); drop();
} }
SharedArray &operator=(const SharedArray &other) SharedArray &operator=(const SharedArray &other)
{ {
cbindgen_private::sixtyfps_shared_array_drop(reinterpret_cast<SharedArray<uint8_t> *>(this)); if (other.inner == inner) { return *this; }
cbindgen_private::sixtyfps_shared_array_clone( drop();
reinterpret_cast<SharedArray<uint8_t> *>(this), inner = other.inner;
reinterpret_cast<const SharedArray<uint8_t> *>(&other)); if (inner->refcount > 0) {
++inner->refcount;
}
return *this; return *this;
} }
SharedArray &operator=(SharedArray &&other) SharedArray &operator=(SharedArray &&other)
@ -44,7 +48,36 @@ struct SharedArray
return *this; return *this;
} }
const T *begin() const {
return reinterpret_cast<const T *>(inner + 1);
}
const T *end() const {
return begin() + inner->size;
}
private: private:
void *inner; // opaque
void drop() {
if (inner->refcount > 0 && (--inner->refcount) == 0) {
auto b = begin(), e = end();
for (auto it = b; it < e; ++it) {
it->~T();
}
}
cbindgen_private::sixtyfps_shared_array_free(
reinterpret_cast<uint8_t *>(inner),
sizeof(SharedArrayHeader) + inner->capacity * sizeof(T),
alignof(SharedArrayHeader));
}
// Unfortunately, this cannot be generated by cbindgen because std::atomic is not understood
struct SharedArrayHeader {
std::atomic<std::intptr_t> refcount;
std::size_t size;
std::size_t capacity;
};
static_assert(alignof(T) <= alignof(SharedArrayHeader), "Not yet supported because we would need to add padding");
SharedArrayHeader *inner;
}; };
} }

View file

@ -73,7 +73,7 @@ pub mod tests;
pub fn use_modules() -> usize { pub fn use_modules() -> usize {
tests::sixtyfps_mock_elapsed_time as usize tests::sixtyfps_mock_elapsed_time as usize
+ signals::ffi::sixtyfps_signal_init as usize + signals::ffi::sixtyfps_signal_init as usize
+ sharedarray::ffi::sixtyfps_shared_array_drop as usize + sharedarray::ffi::sixtyfps_shared_array_empty as usize
+ layout::solve_grid_layout as usize + layout::solve_grid_layout as usize
+ item_tree::ffi::sixtyfps_visit_item_tree as usize + item_tree::ffi::sixtyfps_visit_item_tree as usize
+ graphics::ffi::sixtyfps_new_path_elements as usize + graphics::ffi::sixtyfps_new_path_elements as usize

View file

@ -10,74 +10,95 @@ LICENSE END */
//! module for the SharedArray and related things //! module for the SharedArray and related things
#![allow(unsafe_code)] #![allow(unsafe_code)]
#![warn(missing_docs)] #![warn(missing_docs)]
use core::fmt::Debug;
use core::mem::MaybeUninit; use core::mem::MaybeUninit;
use std::{fmt::Debug, fmt::Display, ops::Deref}; use core::ops::Deref;
use triomphe::{Arc, HeaderWithLength, ThinArc}; use core::ptr::NonNull;
use core::sync::atomic;
use std::alloc;
#[repr(C)]
struct SharedArrayHeader {
refcount: atomic::AtomicIsize,
size: usize,
capacity: usize,
}
#[repr(C)]
struct SharedArrayInner<T> {
header: SharedArrayHeader,
data: MaybeUninit<T>,
}
fn compute_inner_layout<T>(capacity: usize) -> alloc::Layout {
alloc::Layout::new::<SharedArrayHeader>()
.extend(alloc::Layout::array::<T>(capacity).unwrap())
.unwrap()
.0
}
unsafe fn drop_inner<T>(inner: NonNull<SharedArrayInner<T>>) {
debug_assert_eq!(inner.as_ref().header.refcount.load(core::sync::atomic::Ordering::Relaxed), 0);
let data_ptr = inner.as_ref().data.as_ptr();
for x in 0..inner.as_ref().header.size {
drop(core::ptr::read(data_ptr.add(x)));
}
alloc::dealloc(
inner.as_ptr() as *mut u8,
compute_inner_layout::<T>(inner.as_ref().header.capacity),
)
}
/// Allocate the memory for the SharedArray with the given capacity. Return the inner with size and refcount set to 1
fn alloc_with_capacity<T>(capacity: usize) -> NonNull<SharedArrayInner<T>> {
let ptr = unsafe { alloc::alloc(compute_inner_layout::<T>(capacity)) };
unsafe {
core::ptr::write(
ptr as *mut SharedArrayHeader,
SharedArrayHeader { refcount: 1.into(), size: 0, capacity },
);
}
NonNull::new(ptr).unwrap().cast()
}
#[derive(Clone)]
#[repr(C)] #[repr(C)]
/// SharedArray holds a reference-counted read-only copy of `[T]`. /// SharedArray holds a reference-counted read-only copy of `[T]`.
pub struct SharedArray<T: 'static> { pub struct SharedArray<T> {
/// Invariant: The usize header is the `len` of the vector, the contained buffer is `[T]` inner: NonNull<SharedArrayInner<T>>,
inner: ThinArc<usize, MaybeUninit<T>>,
} }
struct PaddingFillingIter<'a, U> { impl<T> Drop for SharedArray<T> {
iter: &'a mut dyn Iterator<Item = MaybeUninit<U>>, fn drop(&mut self) {
pos: usize, unsafe {
len: usize, if self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) < 0 {
padding_elements: usize, return;
} }
if self.inner.as_ref().header.refcount.fetch_sub(1, atomic::Ordering::SeqCst) == 1 {
impl<'a, U> PaddingFillingIter<'a, U> { drop_inner(self.inner)
fn new(len: usize, iter: &'a mut dyn Iterator<Item = MaybeUninit<U>>) -> Self {
let alignment = core::mem::align_of::<usize>();
let mut padding_elements = if len == 0 { 1 } else { 0 }; // ThinArc can't deal with empty arrays, so add padding for empty arrays.
// Add padding to ensure that the size in bytes is a multiple of the pointer alignment. This can mean different
// increments depending on whether sizeof(U) is less or greater than align_of(usize).
loop {
let size_in_bytes = (len + padding_elements) * core::mem::size_of::<U>();
let byte_aligned_size = (size_in_bytes + alignment - 1) & !(alignment - 1);
let padding_bytes = byte_aligned_size - size_in_bytes;
if padding_bytes == 0 {
break;
} }
padding_elements += 1;
} }
Self { iter, pos: 0, len, padding_elements }
} }
} }
impl<'a, U: Clone> Iterator for PaddingFillingIter<'a, U> { impl<T> Clone for SharedArray<T> {
type Item = MaybeUninit<U>; fn clone(&self) -> Self {
fn next(&mut self) -> Option<MaybeUninit<U>> { unsafe {
let pos = self.pos; if self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) > 0 {
self.pos += 1; self.inner.as_ref().header.refcount.fetch_add(1, atomic::Ordering::SeqCst);
if pos < self.len { }
self.iter.next() return SharedArray { inner: self.inner };
} else if pos < self.len + self.padding_elements {
Some(MaybeUninit::uninit())
} else {
None
} }
} }
fn size_hint(&self) -> (usize, Option<usize>) {
let l = self.len + self.padding_elements;
(l, Some(l))
}
} }
impl<'a, U: Clone> core::iter::ExactSizeIterator for PaddingFillingIter<'a, U> {}
impl<T: Clone> SharedArray<T> { impl<T> SharedArray<T> {
fn as_ptr(&self) -> *const T { fn as_ptr(&self) -> *const T {
self.inner.slice.as_ptr() as *const T unsafe { self.inner.as_ref().data.as_ptr() }
} }
/// Size of the string, in bytes /// Size of the string, in bytes
pub fn len(&self) -> usize { pub fn len(&self) -> usize {
self.inner.header.header unsafe { self.inner.as_ref().header.size }
} }
/// Return a slice to the array /// Return a slice to the array
@ -86,67 +107,54 @@ impl<T: Clone> SharedArray<T> {
} }
/// Constructs a new SharedArray from the given iterator. /// Constructs a new SharedArray from the given iterator.
pub fn from_iter(iter: impl Iterator<Item = T> + ExactSizeIterator) -> Self { pub fn from_iter(mut iter: impl Iterator<Item = T> + ExactSizeIterator) -> Self {
let len = iter.len(); let capacity = iter.len();
let item_iter = &mut iter.map(|item| MaybeUninit::new(item)); let inner = alloc_with_capacity::<T>(capacity);
let iter = PaddingFillingIter::new(len, item_iter); let mut result = SharedArray { inner };
let mut size = 0;
SharedArray { while let Some(x) = iter.next() {
inner: Arc::into_thin(Arc::from_header_and_iter( assert_ne!(size, capacity);
HeaderWithLength::new(len, iter.size_hint().0), unsafe {
iter, core::ptr::write(result.inner.as_mut().data.as_mut_ptr().add(size), x);
)), size += 1;
result.inner.as_mut().header.size = size;
}
} }
result
} }
/// Constructs a new SharedArray from the given slice. /// Constructs a new SharedArray from the given slice.
pub fn from(slice: &[T]) -> Self { pub fn from(slice: &[T]) -> Self
where
T: Clone,
{
SharedArray::from_iter(slice.iter().cloned()) SharedArray::from_iter(slice.iter().cloned())
} }
} }
impl<T: Clone> Deref for SharedArray<T> { impl<T> Deref for SharedArray<T> {
type Target = [T]; type Target = [T];
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
self.as_slice() self.as_slice()
} }
} }
trait StaticNull: Sized + 'static {
const NULL: once_cell::sync::Lazy<ThinArc<usize, MaybeUninit<Self>>>;
}
impl<T: Clone + Default + Sized + 'static> StaticNull for T {
const NULL: once_cell::sync::Lazy<ThinArc<usize, MaybeUninit<T>>> =
once_cell::sync::Lazy::new(|| {
let len = 0;
let null_iter = &mut std::iter::empty();
let iter = PaddingFillingIter::new(len, null_iter);
Arc::into_thin(Arc::from_header_and_iter( static SHARED_NULL: SharedArrayHeader =
HeaderWithLength::new(len, iter.size_hint().0), SharedArrayHeader { refcount: std::sync::atomic::AtomicIsize::new(-1), size: 0, capacity: 0 };
iter,
))
});
}
impl<T: Clone + Default> Default for SharedArray<T> { impl<T> Default for SharedArray<T> {
fn default() -> Self { fn default() -> Self {
SharedArray { inner: StaticNull::NULL.clone() } SharedArray { inner: NonNull::from(&SHARED_NULL).cast() }
} }
} }
impl<T: Clone + Debug> Debug for SharedArray<T> { impl<T: Debug> Debug for SharedArray<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.as_slice().fmt(f) self.as_slice().fmt(f)
} }
} }
impl<T: Clone + Debug> Display for SharedArray<T> { impl<T> AsRef<[T]> for SharedArray<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.as_slice().fmt(f)
}
}
impl<T: Clone> AsRef<[T]> for SharedArray<T> {
#[inline] #[inline]
fn as_ref(&self) -> &[T] { fn as_ref(&self) -> &[T] {
self.as_slice() self.as_slice()
@ -156,14 +164,85 @@ impl<T: Clone> AsRef<[T]> for SharedArray<T> {
impl<T, U> PartialEq<U> for SharedArray<T> impl<T, U> PartialEq<U> for SharedArray<T>
where where
U: ?Sized + AsRef<[T]>, U: ?Sized + AsRef<[T]>,
T: Clone + PartialEq, T: PartialEq,
{ {
fn eq(&self, other: &U) -> bool { fn eq(&self, other: &U) -> bool {
self.as_slice() == other.as_ref() self.as_slice() == other.as_ref()
} }
} }
impl<T: Clone + PartialEq> Eq for SharedArray<T> {} impl<T: Eq> Eq for SharedArray<T> {}
impl<T: Clone> IntoIterator for SharedArray<T> {
type Item = T;
type IntoIter = IntoIter<T>;
fn into_iter(self) -> Self::IntoIter {
IntoIter(unsafe {
if self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) == 1 {
let inner = self.inner;
std::mem::forget(self);
inner.as_ref().header.refcount.store(0, atomic::Ordering::Relaxed);
IntoIterInner::UnShared(inner, 0)
} else {
IntoIterInner::Shared(self, 0)
}
})
}
}
enum IntoIterInner<T> {
Shared(SharedArray<T>, usize),
// Elements up to the usize member are already moved out
UnShared(NonNull<SharedArrayInner<T>>, usize),
}
impl<T> Drop for IntoIterInner<T> {
fn drop(&mut self) {
match self {
IntoIterInner::Shared(..) => { /* drop of SharedArray takes care of it */ }
IntoIterInner::UnShared(inner, begin) => unsafe {
debug_assert_eq!(inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed), 0);
let data_ptr = inner.as_ref().data.as_ptr();
for x in (*begin)..inner.as_ref().header.size {
drop(core::ptr::read(data_ptr.add(x)));
}
alloc::dealloc(
inner.as_ptr() as *mut u8,
compute_inner_layout::<T>(inner.as_ref().header.capacity),
)
},
}
}
}
/// An iterator that moves out of a SharedArray.
///
/// This `struct` is created by the `into_iter` method on [`SharedArray`] (provided
/// by the [`IntoIterator`] trait).
pub struct IntoIter<T>(IntoIterInner<T>);
impl<T: Clone> Iterator for IntoIter<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
match &mut self.0 {
IntoIterInner::Shared(array, moved) => {
let result = array.as_slice().get(*moved).cloned();
*moved += 1;
result
}
IntoIterInner::UnShared(inner, begin) => unsafe {
if *begin < inner.as_ref().header.size {
let r = core::ptr::read(inner.as_ref().data.as_ptr().add(*begin));
*begin += 1;
Some(r)
} else {
None
}
},
}
}
}
#[test] #[test]
fn simple_test() { fn simple_test() {
@ -184,24 +263,20 @@ pub(crate) mod ffi {
use super::*; use super::*;
#[no_mangle] #[no_mangle]
/// This function is used for the low-level C++ interface to allocate the backing vector for an empty shared array. /// This function is used for the low-level C++ interface to allocate the backing vector of a SharedArray.
pub unsafe extern "C" fn sixtyfps_shared_array_new_null(out: *mut SharedArray<u8>) { pub unsafe extern "C" fn sixtyfps_shared_array_allocate(size: usize, align: usize) -> *mut u8 {
core::ptr::write(out, SharedArray::<u8>::default()); std::alloc::alloc(std::alloc::Layout::from_size_align(size, align).unwrap())
} }
#[no_mangle] #[no_mangle]
/// This function is used for the low-level C++ interface to clone a shared array by increasing its reference count. /// This function is used for the low-level C++ interface to deallocate the backing vector of a SharedArray
pub unsafe extern "C" fn sixtyfps_shared_array_clone( pub unsafe extern "C" fn sixtyfps_shared_array_free(ptr: *mut u8, size: usize, align: usize) {
out: *mut SharedArray<u8>, std::alloc::dealloc(ptr, std::alloc::Layout::from_size_align(size, align).unwrap())
source: &SharedArray<u8>,
) {
core::ptr::write(out, source.clone());
} }
#[no_mangle] #[no_mangle]
/// This function is used for the low-level C++ interface to decrease the reference count of a shared array. /// This function is used for the low-level C++ interface to initialize the empty SharedArray.
pub unsafe extern "C" fn sixtyfps_shared_array_drop(out: *mut SharedArray<u8>) { pub unsafe extern "C" fn sixtyfps_shared_array_empty() -> *const u8 {
// ?? This won't call drop on the right type... &SHARED_NULL as *const _ as *const u8
core::ptr::read(out);
} }
} }