mirror of
https://github.com/slint-ui/slint.git
synced 2025-09-28 21:04:47 +00:00
Change the implementation of SharedArray so that it can destruct its contents
This commit is contained in:
parent
e19ad3006a
commit
c8fa3354be
3 changed files with 221 additions and 113 deletions
|
@ -10,74 +10,95 @@ LICENSE END */
|
|||
//! module for the SharedArray and related things
|
||||
#![allow(unsafe_code)]
|
||||
#![warn(missing_docs)]
|
||||
use core::fmt::Debug;
|
||||
use core::mem::MaybeUninit;
|
||||
use std::{fmt::Debug, fmt::Display, ops::Deref};
|
||||
use triomphe::{Arc, HeaderWithLength, ThinArc};
|
||||
use core::ops::Deref;
|
||||
use core::ptr::NonNull;
|
||||
use core::sync::atomic;
|
||||
use std::alloc;
|
||||
|
||||
#[repr(C)]
|
||||
struct SharedArrayHeader {
|
||||
refcount: atomic::AtomicIsize,
|
||||
size: usize,
|
||||
capacity: usize,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
struct SharedArrayInner<T> {
|
||||
header: SharedArrayHeader,
|
||||
data: MaybeUninit<T>,
|
||||
}
|
||||
|
||||
fn compute_inner_layout<T>(capacity: usize) -> alloc::Layout {
|
||||
alloc::Layout::new::<SharedArrayHeader>()
|
||||
.extend(alloc::Layout::array::<T>(capacity).unwrap())
|
||||
.unwrap()
|
||||
.0
|
||||
}
|
||||
|
||||
unsafe fn drop_inner<T>(inner: NonNull<SharedArrayInner<T>>) {
|
||||
debug_assert_eq!(inner.as_ref().header.refcount.load(core::sync::atomic::Ordering::Relaxed), 0);
|
||||
let data_ptr = inner.as_ref().data.as_ptr();
|
||||
for x in 0..inner.as_ref().header.size {
|
||||
drop(core::ptr::read(data_ptr.add(x)));
|
||||
}
|
||||
alloc::dealloc(
|
||||
inner.as_ptr() as *mut u8,
|
||||
compute_inner_layout::<T>(inner.as_ref().header.capacity),
|
||||
)
|
||||
}
|
||||
|
||||
/// Allocate the memory for the SharedArray with the given capacity. Return the inner with size and refcount set to 1
|
||||
fn alloc_with_capacity<T>(capacity: usize) -> NonNull<SharedArrayInner<T>> {
|
||||
let ptr = unsafe { alloc::alloc(compute_inner_layout::<T>(capacity)) };
|
||||
unsafe {
|
||||
core::ptr::write(
|
||||
ptr as *mut SharedArrayHeader,
|
||||
SharedArrayHeader { refcount: 1.into(), size: 0, capacity },
|
||||
);
|
||||
}
|
||||
NonNull::new(ptr).unwrap().cast()
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[repr(C)]
|
||||
/// SharedArray holds a reference-counted read-only copy of `[T]`.
|
||||
pub struct SharedArray<T: 'static> {
|
||||
/// Invariant: The usize header is the `len` of the vector, the contained buffer is `[T]`
|
||||
inner: ThinArc<usize, MaybeUninit<T>>,
|
||||
pub struct SharedArray<T> {
|
||||
inner: NonNull<SharedArrayInner<T>>,
|
||||
}
|
||||
|
||||
struct PaddingFillingIter<'a, U> {
|
||||
iter: &'a mut dyn Iterator<Item = MaybeUninit<U>>,
|
||||
pos: usize,
|
||||
len: usize,
|
||||
padding_elements: usize,
|
||||
}
|
||||
|
||||
impl<'a, U> PaddingFillingIter<'a, U> {
|
||||
fn new(len: usize, iter: &'a mut dyn Iterator<Item = MaybeUninit<U>>) -> Self {
|
||||
let alignment = core::mem::align_of::<usize>();
|
||||
let mut padding_elements = if len == 0 { 1 } else { 0 }; // ThinArc can't deal with empty arrays, so add padding for empty arrays.
|
||||
|
||||
// Add padding to ensure that the size in bytes is a multiple of the pointer alignment. This can mean different
|
||||
// increments depending on whether sizeof(U) is less or greater than align_of(usize).
|
||||
loop {
|
||||
let size_in_bytes = (len + padding_elements) * core::mem::size_of::<U>();
|
||||
let byte_aligned_size = (size_in_bytes + alignment - 1) & !(alignment - 1);
|
||||
let padding_bytes = byte_aligned_size - size_in_bytes;
|
||||
if padding_bytes == 0 {
|
||||
break;
|
||||
impl<T> Drop for SharedArray<T> {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
if self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) < 0 {
|
||||
return;
|
||||
}
|
||||
if self.inner.as_ref().header.refcount.fetch_sub(1, atomic::Ordering::SeqCst) == 1 {
|
||||
drop_inner(self.inner)
|
||||
}
|
||||
padding_elements += 1;
|
||||
}
|
||||
|
||||
Self { iter, pos: 0, len, padding_elements }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, U: Clone> Iterator for PaddingFillingIter<'a, U> {
|
||||
type Item = MaybeUninit<U>;
|
||||
fn next(&mut self) -> Option<MaybeUninit<U>> {
|
||||
let pos = self.pos;
|
||||
self.pos += 1;
|
||||
if pos < self.len {
|
||||
self.iter.next()
|
||||
} else if pos < self.len + self.padding_elements {
|
||||
Some(MaybeUninit::uninit())
|
||||
} else {
|
||||
None
|
||||
impl<T> Clone for SharedArray<T> {
|
||||
fn clone(&self) -> Self {
|
||||
unsafe {
|
||||
if self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) > 0 {
|
||||
self.inner.as_ref().header.refcount.fetch_add(1, atomic::Ordering::SeqCst);
|
||||
}
|
||||
return SharedArray { inner: self.inner };
|
||||
}
|
||||
}
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
let l = self.len + self.padding_elements;
|
||||
(l, Some(l))
|
||||
}
|
||||
}
|
||||
impl<'a, U: Clone> core::iter::ExactSizeIterator for PaddingFillingIter<'a, U> {}
|
||||
|
||||
impl<T: Clone> SharedArray<T> {
|
||||
impl<T> SharedArray<T> {
|
||||
fn as_ptr(&self) -> *const T {
|
||||
self.inner.slice.as_ptr() as *const T
|
||||
unsafe { self.inner.as_ref().data.as_ptr() }
|
||||
}
|
||||
|
||||
/// Size of the string, in bytes
|
||||
pub fn len(&self) -> usize {
|
||||
self.inner.header.header
|
||||
unsafe { self.inner.as_ref().header.size }
|
||||
}
|
||||
|
||||
/// Return a slice to the array
|
||||
|
@ -86,67 +107,54 @@ impl<T: Clone> SharedArray<T> {
|
|||
}
|
||||
|
||||
/// Constructs a new SharedArray from the given iterator.
|
||||
pub fn from_iter(iter: impl Iterator<Item = T> + ExactSizeIterator) -> Self {
|
||||
let len = iter.len();
|
||||
let item_iter = &mut iter.map(|item| MaybeUninit::new(item));
|
||||
let iter = PaddingFillingIter::new(len, item_iter);
|
||||
|
||||
SharedArray {
|
||||
inner: Arc::into_thin(Arc::from_header_and_iter(
|
||||
HeaderWithLength::new(len, iter.size_hint().0),
|
||||
iter,
|
||||
)),
|
||||
pub fn from_iter(mut iter: impl Iterator<Item = T> + ExactSizeIterator) -> Self {
|
||||
let capacity = iter.len();
|
||||
let inner = alloc_with_capacity::<T>(capacity);
|
||||
let mut result = SharedArray { inner };
|
||||
let mut size = 0;
|
||||
while let Some(x) = iter.next() {
|
||||
assert_ne!(size, capacity);
|
||||
unsafe {
|
||||
core::ptr::write(result.inner.as_mut().data.as_mut_ptr().add(size), x);
|
||||
size += 1;
|
||||
result.inner.as_mut().header.size = size;
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Constructs a new SharedArray from the given slice.
|
||||
pub fn from(slice: &[T]) -> Self {
|
||||
pub fn from(slice: &[T]) -> Self
|
||||
where
|
||||
T: Clone,
|
||||
{
|
||||
SharedArray::from_iter(slice.iter().cloned())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> Deref for SharedArray<T> {
|
||||
impl<T> Deref for SharedArray<T> {
|
||||
type Target = [T];
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.as_slice()
|
||||
}
|
||||
}
|
||||
trait StaticNull: Sized + 'static {
|
||||
const NULL: once_cell::sync::Lazy<ThinArc<usize, MaybeUninit<Self>>>;
|
||||
}
|
||||
impl<T: Clone + Default + Sized + 'static> StaticNull for T {
|
||||
const NULL: once_cell::sync::Lazy<ThinArc<usize, MaybeUninit<T>>> =
|
||||
once_cell::sync::Lazy::new(|| {
|
||||
let len = 0;
|
||||
let null_iter = &mut std::iter::empty();
|
||||
let iter = PaddingFillingIter::new(len, null_iter);
|
||||
|
||||
Arc::into_thin(Arc::from_header_and_iter(
|
||||
HeaderWithLength::new(len, iter.size_hint().0),
|
||||
iter,
|
||||
))
|
||||
});
|
||||
}
|
||||
static SHARED_NULL: SharedArrayHeader =
|
||||
SharedArrayHeader { refcount: std::sync::atomic::AtomicIsize::new(-1), size: 0, capacity: 0 };
|
||||
|
||||
impl<T: Clone + Default> Default for SharedArray<T> {
|
||||
impl<T> Default for SharedArray<T> {
|
||||
fn default() -> Self {
|
||||
SharedArray { inner: StaticNull::NULL.clone() }
|
||||
SharedArray { inner: NonNull::from(&SHARED_NULL).cast() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone + Debug> Debug for SharedArray<T> {
|
||||
impl<T: Debug> Debug for SharedArray<T> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.as_slice().fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone + Debug> Display for SharedArray<T> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.as_slice().fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> AsRef<[T]> for SharedArray<T> {
|
||||
impl<T> AsRef<[T]> for SharedArray<T> {
|
||||
#[inline]
|
||||
fn as_ref(&self) -> &[T] {
|
||||
self.as_slice()
|
||||
|
@ -156,14 +164,85 @@ impl<T: Clone> AsRef<[T]> for SharedArray<T> {
|
|||
impl<T, U> PartialEq<U> for SharedArray<T>
|
||||
where
|
||||
U: ?Sized + AsRef<[T]>,
|
||||
T: Clone + PartialEq,
|
||||
T: PartialEq,
|
||||
{
|
||||
fn eq(&self, other: &U) -> bool {
|
||||
self.as_slice() == other.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone + PartialEq> Eq for SharedArray<T> {}
|
||||
impl<T: Eq> Eq for SharedArray<T> {}
|
||||
|
||||
impl<T: Clone> IntoIterator for SharedArray<T> {
|
||||
type Item = T;
|
||||
type IntoIter = IntoIter<T>;
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
IntoIter(unsafe {
|
||||
if self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) == 1 {
|
||||
let inner = self.inner;
|
||||
std::mem::forget(self);
|
||||
inner.as_ref().header.refcount.store(0, atomic::Ordering::Relaxed);
|
||||
IntoIterInner::UnShared(inner, 0)
|
||||
} else {
|
||||
IntoIterInner::Shared(self, 0)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
enum IntoIterInner<T> {
|
||||
Shared(SharedArray<T>, usize),
|
||||
// Elements up to the usize member are already moved out
|
||||
UnShared(NonNull<SharedArrayInner<T>>, usize),
|
||||
}
|
||||
|
||||
impl<T> Drop for IntoIterInner<T> {
|
||||
fn drop(&mut self) {
|
||||
match self {
|
||||
IntoIterInner::Shared(..) => { /* drop of SharedArray takes care of it */ }
|
||||
IntoIterInner::UnShared(inner, begin) => unsafe {
|
||||
debug_assert_eq!(inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed), 0);
|
||||
let data_ptr = inner.as_ref().data.as_ptr();
|
||||
for x in (*begin)..inner.as_ref().header.size {
|
||||
drop(core::ptr::read(data_ptr.add(x)));
|
||||
}
|
||||
alloc::dealloc(
|
||||
inner.as_ptr() as *mut u8,
|
||||
compute_inner_layout::<T>(inner.as_ref().header.capacity),
|
||||
)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An iterator that moves out of a SharedArray.
|
||||
///
|
||||
/// This `struct` is created by the `into_iter` method on [`SharedArray`] (provided
|
||||
/// by the [`IntoIterator`] trait).
|
||||
pub struct IntoIter<T>(IntoIterInner<T>);
|
||||
|
||||
impl<T: Clone> Iterator for IntoIter<T> {
|
||||
type Item = T;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
match &mut self.0 {
|
||||
IntoIterInner::Shared(array, moved) => {
|
||||
let result = array.as_slice().get(*moved).cloned();
|
||||
*moved += 1;
|
||||
result
|
||||
}
|
||||
IntoIterInner::UnShared(inner, begin) => unsafe {
|
||||
if *begin < inner.as_ref().header.size {
|
||||
let r = core::ptr::read(inner.as_ref().data.as_ptr().add(*begin));
|
||||
*begin += 1;
|
||||
Some(r)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn simple_test() {
|
||||
|
@ -184,24 +263,20 @@ pub(crate) mod ffi {
|
|||
use super::*;
|
||||
|
||||
#[no_mangle]
|
||||
/// This function is used for the low-level C++ interface to allocate the backing vector for an empty shared array.
|
||||
pub unsafe extern "C" fn sixtyfps_shared_array_new_null(out: *mut SharedArray<u8>) {
|
||||
core::ptr::write(out, SharedArray::<u8>::default());
|
||||
/// This function is used for the low-level C++ interface to allocate the backing vector of a SharedArray.
|
||||
pub unsafe extern "C" fn sixtyfps_shared_array_allocate(size: usize, align: usize) -> *mut u8 {
|
||||
std::alloc::alloc(std::alloc::Layout::from_size_align(size, align).unwrap())
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
/// This function is used for the low-level C++ interface to clone a shared array by increasing its reference count.
|
||||
pub unsafe extern "C" fn sixtyfps_shared_array_clone(
|
||||
out: *mut SharedArray<u8>,
|
||||
source: &SharedArray<u8>,
|
||||
) {
|
||||
core::ptr::write(out, source.clone());
|
||||
/// This function is used for the low-level C++ interface to deallocate the backing vector of a SharedArray
|
||||
pub unsafe extern "C" fn sixtyfps_shared_array_free(ptr: *mut u8, size: usize, align: usize) {
|
||||
std::alloc::dealloc(ptr, std::alloc::Layout::from_size_align(size, align).unwrap())
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
/// This function is used for the low-level C++ interface to decrease the reference count of a shared array.
|
||||
pub unsafe extern "C" fn sixtyfps_shared_array_drop(out: *mut SharedArray<u8>) {
|
||||
// ?? This won't call drop on the right type...
|
||||
core::ptr::read(out);
|
||||
/// This function is used for the low-level C++ interface to initialize the empty SharedArray.
|
||||
pub unsafe extern "C" fn sixtyfps_shared_array_empty() -> *const u8 {
|
||||
&SHARED_NULL as *const _ as *const u8
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue