@@ -21,12 +21,18 @@ use crate::structs::shared_buffer::internal::vtable::Vtable;
2121///
2222/// Enables SharedBuffer to manage any container implementing AsRef<[u8]>
2323/// with atomic reference counting for safe sharing.
24+ ///
25+ /// The `drop_fn` field stores a type-erased destructor so that
26+ /// `owned_drop` can properly clean up the concrete `Owned<T>` without
27+ /// knowing T at the vtable level.
2428#[ repr( C ) ]
2529pub ( crate ) struct Owned < T : AsRef < [ u8 ] > + Send + Sync + ' static > {
2630 pub ( crate ) ref_cnt : AtomicUsize ,
31+ pub ( crate ) drop_fn : unsafe fn ( * mut ( ) ) ,
2732 pub ( crate ) owner : T ,
2833}
2934
35+
3036/// Clones owned buffer by incrementing reference count.
3137unsafe fn owned_clone ( h : & AtomicPtr < ( ) > , p : * const u8 , l : usize ) -> SharedBuffer {
3238 let raw = h. load ( Ordering :: Acquire ) ;
@@ -43,14 +49,26 @@ unsafe fn owned_clone(h: &AtomicPtr<()>, p: *const u8, l: usize) -> SharedBuffer
4349}
4450
4551/// Decrements reference count, deallocating if last reference.
52+ ///
53+ /// Reads the type-erased destructor stored in the `Owned` header
54+ /// to properly drop the concrete `Owned<T>` and run T's destructor.
4655unsafe fn owned_drop ( h : & mut AtomicPtr < ( ) > , _p : * const u8 , _l : usize ) {
4756 let raw = h. load ( Ordering :: Acquire ) ;
4857 if raw. is_null ( ) {
4958 return ;
5059 }
5160 let ref_cnt = unsafe { & * ( raw as * const AtomicUsize ) } ;
5261 if ref_cnt. fetch_sub ( 1 , Ordering :: AcqRel ) == 1 {
53- drop ( unsafe { Box :: from_raw ( raw) } ) ;
62+ // Read the drop function stored after ref_cnt in the Owned header.
63+ // Owned is #[repr(C)] with ref_cnt first, drop_fn second, so the
64+ // layout is the same for all Owned<T>.
65+ unsafe {
66+ let drop_fn_ptr =
67+ ( raw as * const u8 ) . add ( std:: mem:: size_of :: < AtomicUsize > ( ) )
68+ as * const unsafe fn ( * mut ( ) ) ;
69+ let drop_fn = * drop_fn_ptr;
70+ drop_fn ( raw) ;
71+ }
5472 }
5573}
5674
0 commit comments