text
stringlengths
0
4.1M
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use crate::ule::*; use crate::varzerovec::owned::VarZeroVecOwned; use crate::varzerovec::vec::VarZeroVecInner; use crate::vecs::VarZeroVecFormat; use crate::{VarZeroSlice, VarZeroVec}; use crate::{ZeroSlice, ZeroVec}; use alloc::boxed::Box; use alloc::vec::Vec; use core::cmp::Ordering; use core::mem; use core::ops::Range; /// Trait abstracting over [`ZeroVec`] and [`VarZeroVec`], for use in [`ZeroMap`](super::ZeroMap). **You /// should not be implementing or calling this trait directly.** /// /// The T type is the type received by [`Self::zvl_binary_search()`], as well as the one used /// for human-readable serialization. /// /// Methods are prefixed with `zvl_*` to avoid clashes with methods on the types themselves pub trait ZeroVecLike<T: ?Sized> { /// The type returned by `Self::get()` type GetType: ?Sized + 'static; /// A fully borrowed version of this type SliceVariant: ZeroVecLike<T, GetType = Self::GetType> + ?Sized; /// Create a new, empty borrowed variant fn zvl_new_borrowed() -> &'static Self::SliceVariant; /// Search for a key in a sorted vector, returns `Ok(index)` if found, /// returns `Err(insert_index)` if not found, where `insert_index` is the /// index where it should be inserted to maintain sort order. fn zvl_binary_search(&self, k: &T) -> Result<usize, usize> where T: Ord; /// Search for a key within a certain range in a sorted vector. /// Returns `None` if the range is out of bounds, and /// `Ok` or `Err` in the same way as `zvl_binary_search`. /// Indices are returned relative to the start of the range. fn zvl_binary_search_in_range( &self, k: &T, range: Range<usize>, ) -> Option<Result<usize, usize>> where T: Ord; /// Search for a key in a sorted vector by a predicate, returns `Ok(index)` if found, /// returns `Err(insert_index)` if not found, where `insert_index` is the /// index where it should be inserted to maintain sort order. fn zvl_binary_search_by(&self, predicate: impl FnMut(&T) -> Ordering) -> Result<usize, usize>; /// Search for a key within a certain range in a sorted vector by a predicate. /// Returns `None` if the range is out of bounds, and /// `Ok` or `Err` in the same way as `zvl_binary_search`. /// Indices are returned relative to the start of the range. fn zvl_binary_search_in_range_by( &self, predicate: impl FnMut(&T) -> Ordering, range: Range<usize>, ) -> Option<Result<usize, usize>>; /// Get element at `index` fn zvl_get(&self, index: usize) -> Option<&Self::GetType>; /// The length of this vector fn zvl_len(&self) -> usize; /// Check if this vector is in ascending order according to `T`s `Ord` impl fn zvl_is_ascending(&self) -> bool where T: Ord, { if let Some(first) = self.zvl_get(0) { let mut prev = first; for i in 1..self.zvl_len() { #[allow(clippy::unwrap_used)] // looping over the valid indices let curr = self.zvl_get(i).unwrap(); if Self::get_cmp_get(prev, curr) != Ordering::Less { return false; } prev = curr; } } true } /// Check if this vector is empty fn zvl_is_empty(&self) -> bool { self.zvl_len() == 0 } /// Construct a borrowed variant by borrowing from `&self`. /// /// This function behaves like `&'b self -> Self::SliceVariant<'b>`, /// where `'b` is the lifetime of the reference to this object. /// /// Note: We rely on the compiler recognizing `'a` and `'b` as covariant and /// casting `&'b Self<'a>` to `&'b Self<'b>` when this gets called, which works /// out for `ZeroVec` and `VarZeroVec` containers just fine. fn zvl_as_borrowed(&self) -> &Self::SliceVariant; /// Compare this type with a `Self::GetType`. This must produce the same result as /// if `g` were converted to `Self` #[inline] fn t_cmp_get(t: &T, g: &Self::GetType) -> Ordering where T: Ord, { Self::zvl_get_as_t(g, |g| t.cmp(g)) } /// Compare two values of `Self::GetType`. This must produce the same result as /// if both `a` and `b` were converted to `Self` #[inline] fn get_cmp_get(a: &Self::GetType, b: &Self::GetType) -> Ordering where T: Ord, { Self::zvl_get_as_t(a, |a| Self::zvl_get_as_t(b, |b| a.cmp(b))) } /// Obtain a reference to T, passed to a closure /// /// This uses a callback because it's not possible to return owned-or-borrowed /// types without GATs /// /// Impls should guarantee that the callback function is be called exactly once. fn zvl_get_as_t<R>(g: &Self::GetType, f: impl FnOnce(&T) -> R) -> R; } /// Trait abstracting over [`ZeroVec`] and [`VarZeroVec`], for use in [`ZeroMap`](super::ZeroMap). **You /// should not be implementing or calling this trait directly.** /// /// This trait augments [`ZeroVecLike`] with methods allowing for mutation of the underlying /// vector for owned vector types. /// /// Methods are prefixed with `zvl_*` to avoid clashes with methods on the types themselves pub trait MutableZeroVecLike<'a, T: ?Sized>: ZeroVecLike<T> { /// The type returned by `Self::remove()` and `Self::replace()` type OwnedType; /// Insert an element at `index` fn zvl_insert(&mut self, index: usize, value: &T); /// Remove the element at `index` (panicking if nonexistant) fn zvl_remove(&mut self, index: usize) -> Self::OwnedType; /// Replace the element at `index` with another one, returning the old element fn zvl_replace(&mut self, index: usize, value: &T) -> Self::OwnedType; /// Push an element to the end of this vector fn zvl_push(&mut self, value: &T); /// Create a new, empty vector, with given capacity fn zvl_with_capacity(cap: usize) -> Self; /// Remove all elements from the vector fn zvl_clear(&mut self); /// Reserve space for `addl` additional elements fn zvl_reserve(&mut self, addl: usize); /// Applies the permutation such that `before.zvl_get(permutation[i]) == after.zvl_get(i)`. /// /// # Panics /// If `permutation` is not a valid permutation of length `zvl_len()`. fn zvl_permute(&mut self, permutation: &mut [usize]); /// Convert an owned value to a borrowed T fn owned_as_t(o: &Self::OwnedType) -> &T; /// Construct from the borrowed version of the type /// /// These are useful to ensure serialization parity between borrowed and owned versions fn zvl_from_borrowed(b: &'a Self::SliceVariant) -> Self; /// Extract the inner borrowed variant if possible. Returns `None` if the data is owned. /// /// This function behaves like `&'_ self -> Self::SliceVariant<'a>`, /// where `'a` is the lifetime of this object's borrowed data. /// /// This function is similar to matching the `Borrowed` variant of `ZeroVec` /// or `VarZeroVec`, returning the inner borrowed type. fn zvl_as_borrowed_inner(&self) -> Option<&'a Self::SliceVariant>; } impl<'a, T> ZeroVecLike<T> for ZeroVec<'a, T> where T: 'a + AsULE + Copy, { type GetType = T::ULE; type SliceVariant = ZeroSlice<T>; fn zvl_new_borrowed() -> &'static Self::SliceVariant { ZeroSlice::<T>::new_empty() } fn zvl_binary_search(&self, k: &T) -> Result<usize, usize> where T: Ord, { ZeroSlice::binary_search(self, k) } fn zvl_binary_search_in_range(&self, k: &T, range: Range<usize>) -> Option<Result<usize, usize>> where T: Ord, { let zs: &ZeroSlice<T> = self; zs.zvl_binary_search_in_range(k, range) } fn zvl_binary_search_by( &self, mut predicate: impl FnMut(&T) -> Ordering, ) -> Result<usize, usize> { ZeroSlice::binary_search_by(self, |probe| predicate(&probe)) } fn zvl_binary_search_in_range_by( &self, predicate: impl FnMut(&T) -> Ordering, range: Range<usize>, ) -> Option<Result<usize, usize>> { let zs: &ZeroSlice<T> = self; zs.zvl_binary_search_in_range_by(predicate, range) } fn zvl_get(&self, index: usize) -> Option<&T::ULE> { self.get_ule_ref(index) } fn zvl_len(&self) -> usize { ZeroSlice::len(self) } fn zvl_as_borrowed(&self) -> &ZeroSlice<T> { self } #[inline] fn zvl_get_as_t<R>(g: &Self::GetType, f: impl FnOnce(&T) -> R) -> R { f(&T::from_unaligned(*g)) } } impl<T> ZeroVecLike<T> for ZeroSlice<T> where T: AsULE + Copy, { type GetType = T::ULE; type SliceVariant = ZeroSlice<T>; fn zvl_new_borrowed() -> &'static Self::SliceVariant { ZeroSlice::<T>::new_empty() } fn zvl_binary_search(&self, k: &T) -> Result<usize, usize> where T: Ord, { ZeroSlice::binary_search(self, k) } fn zvl_binary_search_in_range(&self, k: &T, range: Range<usize>) -> Option<Result<usize, usize>> where T: Ord, { let subslice = self.get_subslice(range)?; Some(ZeroSlice::binary_search(subslice, k)) } fn zvl_binary_search_by( &self, mut predicate: impl FnMut(&T) -> Ordering, ) -> Result<usize, usize> { ZeroSlice::binary_search_by(self, |probe| predicate(&probe)) } fn zvl_binary_search_in_range_by( &self, mut predicate: impl FnMut(&T) -> Ordering, range: Range<usize>, ) -> Option<Result<usize, usize>> { let subslice = self.get_subslice(range)?; Some(ZeroSlice::binary_search_by(subslice, |probe| { predicate(&probe) })) } fn zvl_get(&self, index: usize) -> Option<&T::ULE> { self.get_ule_ref(index) } fn zvl_len(&self) -> usize { ZeroSlice::len(self) } fn zvl_as_borrowed(&self) -> &ZeroSlice<T> { self } #[inline] fn zvl_get_as_t<R>(g: &Self::GetType, f: impl FnOnce(&T) -> R) -> R { f(&T::from_unaligned(*g)) } } impl<'a, T> MutableZeroVecLike<'a, T> for ZeroVec<'a, T> where T: AsULE + Copy + 'static, { type OwnedType = T; fn zvl_insert(&mut self, index: usize, value: &T) { self.with_mut(|v| v.insert(index, value.to_unaligned())) } fn zvl_remove(&mut self, index: usize) -> T { T::from_unaligned(self.with_mut(|v| v.remove(index))) } fn zvl_replace(&mut self, index: usize, value: &T) -> T { #[allow(clippy::indexing_slicing)] let unaligned = self.with_mut(|vec| { debug_assert!(index < vec.len()); mem::replace(&mut vec[index], value.to_unaligned()) }); T::from_unaligned(unaligned) } fn zvl_push(&mut self, value: &T) { self.with_mut(|v| v.push(value.to_unaligned())) } fn zvl_with_capacity(cap: usize) -> Self { if cap == 0 { ZeroVec::new() } else { ZeroVec::new_owned(Vec::with_capacity(cap)) } } fn zvl_clear(&mut self) { self.with_mut(|v| v.clear()) } fn zvl_reserve(&mut self, addl: usize) { self.with_mut(|v| v.reserve(addl)) } fn owned_as_t(o: &Self::OwnedType) -> &T { o } fn zvl_from_borrowed(b: &'a ZeroSlice<T>) -> Self { b.as_zerovec() } fn zvl_as_borrowed_inner(&self) -> Option<&'a ZeroSlice<T>> { self.as_maybe_borrowed() } #[allow(clippy::indexing_slicing)] // documented panic fn zvl_permute(&mut self, permutation: &mut [usize]) { assert_eq!(permutation.len(), self.zvl_len()); let vec = self.to_mut_slice(); for cycle_start in 0..permutation.len() { let mut curr = cycle_start; let mut next = permutation[curr]; while next != cycle_start { vec.swap(curr, next); // Make curr a self-cycle so we don't use it as a cycle_start later permutation[curr] = curr; curr = next; next = permutation[next]; } permutation[curr] = curr; } } } impl<'a, T, F> ZeroVecLike<T> for VarZeroVec<'a, T, F> where T: VarULE, T: ?Sized, F: VarZeroVecFormat, { type GetType = T; type SliceVariant = VarZeroSlice<T, F>; fn zvl_new_borrowed() -> &'static Self::SliceVariant { VarZeroSlice::<T, F>::new_empty() } fn zvl_binary_search(&self, k: &T) -> Result<usize, usize> where T: Ord, { self.binary_search(k) } fn zvl_binary_search_in_range(&self, k: &T, range: Range<usize>) -> Option<Result<usize, usize>> where T: Ord, { self.binary_search_in_range(k, range) } fn zvl_binary_search_by(&self, predicate: impl FnMut(&T) -> Ordering) -> Result<usize, usize> { self.binary_search_by(predicate) } fn zvl_binary_search_in_range_by( &self, predicate: impl FnMut(&T) -> Ordering, range: Range<usize>, ) -> Option<Result<usize, usize>> { self.binary_search_in_range_by(predicate, range) } fn zvl_get(&self, index: usize) -> Option<&T> { self.get(index) } fn zvl_len(&self) -> usize { self.len() } fn zvl_as_borrowed(&self) -> &VarZeroSlice<T, F> { self.as_slice() } #[inline] fn zvl_get_as_t<R>(g: &Self::GetType, f: impl FnOnce(&T) -> R) -> R { f(g) } } impl<T, F> ZeroVecLike<T> for VarZeroSlice<T, F> where T: VarULE, T: ?Sized, F: VarZeroVecFormat, { type GetType = T; type SliceVariant = VarZeroSlice<T, F>; fn zvl_new_borrowed() -> &'static Self::SliceVariant { VarZeroSlice::<T, F>::new_empty() } fn zvl_binary_search(&self, k: &T) -> Result<usize, usize> where T: Ord, { self.binary_search(k) } fn zvl_binary_search_in_range(&self, k: &T, range: Range<usize>) -> Option<Result<usize, usize>> where T: Ord, { self.binary_search_in_range(k, range) } fn zvl_binary_search_by(&self, predicate: impl FnMut(&T) -> Ordering) -> Result<usize, usize> { self.binary_search_by(predicate) } fn zvl_binary_search_in_range_by( &self, predicate: impl FnMut(&T) -> Ordering, range: Range<usize>, ) -> Option<Result<usize, usize>> { self.binary_search_in_range_by(predicate, range) } fn zvl_get(&self, index: usize) -> Option<&T> { self.get(index) } fn zvl_len(&self) -> usize { self.len() } fn zvl_as_borrowed(&self) -> &VarZeroSlice<T, F> { self } #[inline] fn zvl_get_as_t<R>(g: &Self::GetType, f: impl FnOnce(&T) -> R) -> R { f(g) } } impl<'a, T, F> MutableZeroVecLike<'a, T> for VarZeroVec<'a, T, F> where T: VarULE, T: ?Sized, F: VarZeroVecFormat, { type OwnedType = Box<T>; fn zvl_insert(&mut self, index: usize, value: &T) { self.make_mut().insert(index, value) } fn zvl_remove(&mut self, index: usize) -> Box<T> { let vec = self.make_mut(); debug_assert!(index < vec.len()); #[allow(clippy::unwrap_used)] let old = vec.get(index).unwrap().to_boxed(); vec.remove(index); old } fn zvl_replace(&mut self, index: usize, value: &T) -> Box<T> { let vec = self.make_mut(); debug_assert!(index < vec.len()); #[allow(clippy::unwrap_used)] let old = vec.get(index).unwrap().to_boxed(); vec.replace(index, value); old } fn zvl_push(&mut self, value: &T) { let len = self.len(); self.make_mut().insert(len, value) } fn zvl_with_capacity(cap: usize) -> Self { if cap == 0 { VarZeroVec::new() } else { Self::from(VarZeroVecOwned::with_capacity(cap)) } } fn zvl_clear(&mut self) { self.make_mut().clear() } fn zvl_reserve(&mut self, addl: usize) { self.make_mut().reserve(addl) } fn owned_as_t(o: &Self::OwnedType) -> &T { o } fn zvl_from_borrowed(b: &'a VarZeroSlice<T, F>) -> Self { b.as_varzerovec() } fn zvl_as_borrowed_inner(&self) -> Option<&'a VarZeroSlice<T, F>> { if let Self(VarZeroVecInner::Borrowed(b)) = *self { Some(b) } else { None } } #[allow(clippy::unwrap_used)] // documented panic fn zvl_permute(&mut self, permutation: &mut [usize]) { assert_eq!(permutation.len(), self.zvl_len()); let mut result = VarZeroVecOwned::new(); for &i in permutation.iter() { result.push(self.get(i).unwrap()); } *self = Self(VarZeroVecInner::Owned(result)); } } #[cfg(test)] mod test { use super::*; #[test] fn test_zerovec_binary_search_in_range() { let zv: ZeroVec<u16> = ZeroVec::from_slice_or_alloc(&[11, 22, 33, 44, 55, 66, 77]); // Full range search assert_eq!(zv.zvl_binary_search_in_range(&11, 0..7), Some(Ok(0))); assert_eq!(zv.zvl_binary_search_in_range(&12, 0..7), Some(Err(1))); assert_eq!(zv.zvl_binary_search_in_range(&44, 0..7), Some(Ok(3))); assert_eq!(zv.zvl_binary_search_in_range(&45, 0..7), Some(Err(4))); assert_eq!(zv.zvl_binary_search_in_range(&77, 0..7), Some(Ok(6))); assert_eq!(zv.zvl_binary_search_in_range(&78, 0..7), Some(Err(7))); // Out-of-range search assert_eq!(zv.zvl_binary_search_in_range(&44, 0..2), Some(Err(2))); assert_eq!(zv.zvl_binary_search_in_range(&44, 5..7), Some(Err(0))); // Offset search assert_eq!(zv.zvl_binary_search_in_range(&44, 2..5), Some(Ok(1))); assert_eq!(zv.zvl_binary_search_in_range(&45, 2..5), Some(Err(2))); // Out-of-bounds assert_eq!(zv.zvl_binary_search_in_range(&44, 0..100), None); assert_eq!(zv.zvl_binary_search_in_range(&44, 100..200), None); } #[test] fn test_permute() { let mut zv: ZeroVec<u16> = ZeroVec::from_slice_or_alloc(&[11, 22, 33, 44, 55, 66, 77]); let mut permutation = vec![3, 2, 1, 0, 6, 5, 4]; zv.zvl_permute(&mut permutation); assert_eq!(&zv, &[44, 33, 22, 11, 77, 66, 55]); let mut vzv: VarZeroVec<str> = VarZeroVec::from( VarZeroVecOwned::try_from_elements(&["11", "22", "33", "44", "55", "66", "77"]) .unwrap(), ); let mut permutation = vec![3, 2, 1, 0, 6, 5, 4]; vzv.zvl_permute(&mut permutation); assert_eq!(&vzv, &["44", "33", "22", "11", "77", "66", "55"]); } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use crate::ZeroSlice; use core::cmp::Ordering; use core::fmt; use crate::map::ZeroMapKV; use crate::map::ZeroVecLike; use crate::map2d::ZeroMap2dCursor; /// A borrowed-only version of [`ZeroMap2d`](super::ZeroMap2d) /// /// This is useful for fully-zero-copy deserialization from non-human-readable /// serialization formats. It also has the advantage that it can return references that live for /// the lifetime of the backing buffer as opposed to that of the [`ZeroMap2dBorrowed`] instance. /// /// # Examples /// /// ``` /// use zerovec::maps::ZeroMap2dBorrowed; /// /// // Example byte buffer representing the map { 1: {2: "three" } } /// let BINCODE_BYTES: &[u8; 47] = &[ /// 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, /// 0, 0, 0, 0, 0, 0, 2, 0, 7, 0, 0, 0, 0, 0, 0, 0, 1, 0, 116, 104, 114, /// 101, 101, /// ]; /// /// // Deserializing to ZeroMap2d requires no heap allocations. /// let zero_map: ZeroMap2dBorrowed<u16, u16, str> = /// bincode::deserialize(BINCODE_BYTES) /// .expect("Should deserialize successfully"); /// assert_eq!(zero_map.get_2d(&1, &2), Some("three")); /// ``` /// /// This can be obtained from a [`ZeroMap2d`](super::ZeroMap2d) via [`ZeroMap2d::as_borrowed`](super::ZeroMap2d::as_borrowed) pub struct ZeroMap2dBorrowed<'a, K0, K1, V> where K0: ZeroMapKV<'a>, K1: ZeroMapKV<'a>, V: ZeroMapKV<'a>, K0: ?Sized, K1: ?Sized, V: ?Sized, { pub(crate) keys0: &'a K0::Slice, pub(crate) joiner: &'a ZeroSlice<u32>, pub(crate) keys1: &'a K1::Slice, pub(crate) values: &'a V::Slice, } impl<'a, K0, K1, V> Copy for ZeroMap2dBorrowed<'a, K0, K1, V> where K0: ZeroMapKV<'a>, K1: ZeroMapKV<'a>, V: ZeroMapKV<'a>, K0: ?Sized, K1: ?Sized, V: ?Sized, { } impl<'a, K0, K1, V> Clone for ZeroMap2dBorrowed<'a, K0, K1, V> where K0: ZeroMapKV<'a>, K1: ZeroMapKV<'a>, V: ZeroMapKV<'a>, K0: ?Sized, K1: ?Sized, V: ?Sized, { fn clone(&self) -> Self { *self } } impl<'a, K0, K1, V> Default for ZeroMap2dBorrowed<'a, K0, K1, V> where K0: ZeroMapKV<'a>, K1: ZeroMapKV<'a>, V: ZeroMapKV<'a>, K0::Slice: 'static, K1::Slice: 'static, V::Slice: 'static, K0: ?Sized, K1: ?Sized, V: ?Sized, { fn default() -> Self { Self::new() } } impl<'a, K0, K1, V> ZeroMap2dBorrowed<'a, K0, K1, V> where K0: ZeroMapKV<'a>, K1: ZeroMapKV<'a>, V: ZeroMapKV<'a>, K0::Slice: 'static, K1::Slice: 'static, V::Slice: 'static, K0: ?Sized, K1: ?Sized, V: ?Sized, { /// Creates a new, empty `ZeroMap2dBorrowed<K0, K1, V>`. /// /// Note: Since [`ZeroMap2dBorrowed`] is not mutable, the return value will be a stub unless /// converted into a [`ZeroMap2d`](super::ZeroMap2d). /// /// # Examples /// /// ``` /// use zerovec::maps::ZeroMap2dBorrowed; /// /// let zm: ZeroMap2dBorrowed<u16, u16, str> = ZeroMap2dBorrowed::new(); /// assert!(zm.is_empty()); /// ``` pub fn new() -> Self { Self { keys0: K0::Container::zvl_new_borrowed(), joiner: Default::default(), keys1: K1::Container::zvl_new_borrowed(), values: V::Container::zvl_new_borrowed(), } } } impl<'a, K0, K1, V> ZeroMap2dBorrowed<'a, K0, K1, V> where K0: ZeroMapKV<'a>, K1: ZeroMapKV<'a>, V: ZeroMapKV<'a>, K0: ?Sized, K1: ?Sized, V: ?Sized, { #[doc(hidden)] // databake internal pub const unsafe fn from_parts_unchecked( keys0: &'a K0::Slice, joiner: &'a ZeroSlice<u32>, keys1: &'a K1::Slice, values: &'a V::Slice, ) -> Self { Self { keys0, joiner, keys1, values, } } /// The number of elements in the [`ZeroMap2dBorrowed`] pub fn len(&self) -> usize { self.values.zvl_len() } /// Whether the [`ZeroMap2dBorrowed`] is empty pub fn is_empty(&self) -> bool { self.values.zvl_len() == 0 } } impl<'a, K0, K1, V> ZeroMap2dBorrowed<'a, K0, K1, V> where K0: ZeroMapKV<'a> + Ord, K1: ZeroMapKV<'a> + Ord, V: ZeroMapKV<'a>, K0: ?Sized, K1: ?Sized, V: ?Sized, { /// Get the value associated with `key0` and `key1`, if it exists. /// /// This is able to return values that live longer than the map itself /// since they borrow directly from the backing buffer. This is the /// primary advantage of using [`ZeroMap2dBorrowed`](super::ZeroMap2dBorrowed) over [`ZeroMap2d`](super::ZeroMap2d). /// /// ```rust /// use zerovec::ZeroMap2d; /// /// let mut map = ZeroMap2d::new(); /// map.insert(&1, "one", "foo"); /// map.insert(&2, "one", "bar"); /// map.insert(&2, "two", "baz"); /// /// let borrowed = map.as_borrowed(); /// assert_eq!(borrowed.get_2d(&1, "one"), Some("foo")); /// assert_eq!(borrowed.get_2d(&1, "two"), None); /// assert_eq!(borrowed.get_2d(&2, "one"), Some("bar")); /// assert_eq!(borrowed.get_2d(&2, "two"), Some("baz")); /// assert_eq!(borrowed.get_2d(&3, "three"), None); /// ``` pub fn get_2d(&self, key0: &K0, key1: &K1) -> Option<&'a V::GetType> { self.get0(key0)?.get1(key1) } } impl<'a, K0, K1, V> ZeroMap2dBorrowed<'a, K0, K1, V> where K0: ZeroMapKV<'a> + Ord, K1: ZeroMapKV<'a>, V: ZeroMapKV<'a>, K0: ?Sized, K1: ?Sized, V: ?Sized, { /// Gets a cursor for `key0`. If `None`, then `key0` is not in the map. If `Some`, /// then `key0` is in the map, and `key1` can be queried. /// /// ```rust /// use zerovec::ZeroMap2d; /// /// let mut map = ZeroMap2d::new(); /// map.insert(&1, "one", "foo"); /// map.insert(&2, "two", "bar"); /// let borrowed = map.as_borrowed(); /// assert!(matches!(borrowed.get0(&1), Some(_))); /// assert!(matches!(borrowed.get0(&3), None)); /// ``` #[inline] pub fn get0<'l>(&'l self, key0: &K0) -> Option<ZeroMap2dCursor<'a, 'a, K0, K1, V>> { let key0_index = self.keys0.zvl_binary_search(key0).ok()?; Some(ZeroMap2dCursor::from_borrowed(self, key0_index)) } /// Binary search the map for `key0`, returning a cursor. /// /// ```rust /// use zerovec::ZeroMap2d; /// /// let mut map = ZeroMap2d::new(); /// map.insert(&1, "one", "foo"); /// map.insert(&2, "two", "bar"); /// let borrowed = map.as_borrowed(); /// assert!(matches!(borrowed.get0_by(|probe| probe.cmp(&1)), Some(_))); /// assert!(matches!(borrowed.get0_by(|probe| probe.cmp(&3)), None)); /// ``` pub fn get0_by<'l>( &'l self, predicate: impl FnMut(&K0) -> Ordering, ) -> Option<ZeroMap2dCursor<'a, 'a, K0, K1, V>> { let key0_index = self.keys0.zvl_binary_search_by(predicate).ok()?; Some(ZeroMap2dCursor::from_borrowed(self, key0_index)) } /// Returns whether `key0` is contained in this map /// /// ```rust /// use zerovec::ZeroMap2d; /// /// let mut map = ZeroMap2d::new(); /// map.insert(&1, "one", "foo"); /// map.insert(&2, "two", "bar"); /// let borrowed = map.as_borrowed(); /// assert!(borrowed.contains_key0(&1)); /// assert!(!borrowed.contains_key0(&3)); /// ``` pub fn contains_key0(&self, key0: &K0) -> bool { self.keys0.zvl_binary_search(key0).is_ok() } } impl<'a, K0, K1, V> ZeroMap2dBorrowed<'a, K0, K1, V> where K0: ZeroMapKV<'a>, K1: ZeroMapKV<'a>, V: ZeroMapKV<'a>, K0: ?Sized, K1: ?Sized, V: ?Sized, { /// Produce an ordered iterator over keys0 pub fn iter0<'l>(&'l self) -> impl Iterator<Item = ZeroMap2dCursor<'a, 'a, K0, K1, V>> + 'l { (0..self.keys0.zvl_len()).map(move |idx| ZeroMap2dCursor::from_borrowed(self, idx)) } } impl<'a, K0, K1, V> ZeroMap2dBorrowed<'a, K0, K1, V> where K0: ZeroMapKV<'a> + Ord, K1: ZeroMapKV<'a> + Ord, V: ZeroMapKV<'a>, V: Copy, K0: ?Sized, K1: ?Sized, { /// For cases when `V` is fixed-size, obtain a direct copy of `V` instead of `V::ULE` pub fn get_copied_2d(&self, key0: &K0, key1: &K1) -> Option<V> { self.get0(key0)?.get1_copied(key1) } } // We can't use the default PartialEq because ZeroMap2d is invariant // so otherwise rustc will not automatically allow you to compare ZeroMaps // with different lifetimes impl<'a, 'b, K0, K1, V> PartialEq<ZeroMap2dBorrowed<'b, K0, K1, V>> for ZeroMap2dBorrowed<'a, K0, K1, V> where K0: for<'c> ZeroMapKV<'c> + ?Sized, K1: for<'c> ZeroMapKV<'c> + ?Sized, V: for<'c> ZeroMapKV<'c> + ?Sized, <K0 as ZeroMapKV<'a>>::Slice: PartialEq<<K0 as ZeroMapKV<'b>>::Slice>, <K1 as ZeroMapKV<'a>>::Slice: PartialEq<<K1 as ZeroMapKV<'b>>::Slice>, <V as ZeroMapKV<'a>>::Slice: PartialEq<<V as ZeroMapKV<'b>>::Slice>, { fn eq(&self, other: &ZeroMap2dBorrowed<'b, K0, K1, V>) -> bool { self.keys0.eq(other.keys0) && self.joiner.eq(other.joiner) && self.keys1.eq(other.keys1) && self.values.eq(other.values) } } impl<'a, K0, K1, V> fmt::Debug for ZeroMap2dBorrowed<'a, K0, K1, V> where K0: ZeroMapKV<'a> + ?Sized, K1: ZeroMapKV<'a> + ?Sized, V: ZeroMapKV<'a> + ?Sized, K0::Slice: fmt::Debug, K1::Slice: fmt::Debug, V::Slice: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { f.debug_struct("ZeroMap2dBorrowed") .field("keys0", &self.keys0) .field("joiner", &self.joiner) .field("keys1", &self.keys1) .field("values", &self.values) .finish() } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use crate::{ZeroMap2d, ZeroSlice}; use core::cmp::Ordering; use core::fmt; use core::ops::Range; use crate::map::ZeroMapKV; use crate::map::ZeroVecLike; use super::ZeroMap2dBorrowed; /// An intermediate state of queries over [`ZeroMap2d`] and [`ZeroMap2dBorrowed`]. pub struct ZeroMap2dCursor<'l, 'a, K0, K1, V> where K0: ZeroMapKV<'a>, K1: ZeroMapKV<'a>, V: ZeroMapKV<'a>, K0: ?Sized, K1: ?Sized, V: ?Sized, { // Invariant: these fields have the same invariants as they do in ZeroMap2d keys0: &'l K0::Slice, joiner: &'l ZeroSlice<u32>, keys1: &'l K1::Slice, values: &'l V::Slice, // Invariant: key0_index is in range key0_index: usize, } impl<'a, K0, K1, V> ZeroMap2dCursor<'a, 'a, K0, K1, V> where K0: ZeroMapKV<'a>, K1: ZeroMapKV<'a>, V: ZeroMapKV<'a>, K0: ?Sized, K1: ?Sized, V: ?Sized, { /// `key0_index` must be in range pub(crate) fn from_borrowed( borrowed: &ZeroMap2dBorrowed<'a, K0, K1, V>, key0_index: usize, ) -> Self { debug_assert!(key0_index < borrowed.joiner.len()); ZeroMap2dCursor { keys0: borrowed.keys0, joiner: borrowed.joiner, keys1: borrowed.keys1, values: borrowed.values, key0_index, } } } impl<'l, 'a, K0, K1, V> ZeroMap2dCursor<'l, 'a, K0, K1, V> where K0: ZeroMapKV<'a>, K1: ZeroMapKV<'a>, V: ZeroMapKV<'a>, K0: ?Sized, K1: ?Sized, V: ?Sized, { /// `key0_index` must be in range pub(crate) fn from_cow(cow: &'l ZeroMap2d<'a, K0, K1, V>, key0_index: usize) -> Self { debug_assert!(key0_index < cow.joiner.len()); Self { keys0: cow.keys0.zvl_as_borrowed(), joiner: &cow.joiner, keys1: cow.keys1.zvl_as_borrowed(), values: cow.values.zvl_as_borrowed(), key0_index, } } /// Returns the key0 corresponding to the cursor position. /// /// ```rust /// use zerovec::ZeroMap2d; /// /// let mut map = ZeroMap2d::new(); /// map.insert("one", &1u32, "foo"); /// assert_eq!(map.get0("one").unwrap().key0(), "one"); /// ``` pub fn key0(&self) -> &'l K0::GetType { #[allow(clippy::unwrap_used)] // safe by invariant on `self.key0_index` self.keys0.zvl_get(self.key0_index).unwrap() } /// Borrow an ordered iterator over keys1 and values for a particular key0. /// /// To get the values as copy types, see [`Self::iter1_copied`]. /// /// For an example, see [`ZeroMap2d::iter0()`]. pub fn iter1( &self, ) -> impl DoubleEndedIterator< Item = ( &'l <K1 as ZeroMapKV<'a>>::GetType, &'l <V as ZeroMapKV<'a>>::GetType, ), > + ExactSizeIterator + '_ { let range = self.get_range(); #[allow(clippy::unwrap_used)] // `self.get_range()` returns a valid range range.map(move |idx| { ( self.keys1.zvl_get(idx).unwrap(), self.values.zvl_get(idx).unwrap(), ) }) } /// Transform this cursor into an ordered iterator over keys1 for a particular key0. pub fn into_iter1( self, ) -> impl DoubleEndedIterator< Item = ( &'l <K1 as ZeroMapKV<'a>>::GetType, &'l <V as ZeroMapKV<'a>>::GetType, ), > + ExactSizeIterator { let range = self.get_range(); #[allow(clippy::unwrap_used)] // `self.get_range()` returns a valid range range.map(move |idx| { ( self.keys1.zvl_get(idx).unwrap(), self.values.zvl_get(idx).unwrap(), ) }) } /// Given key0_index, returns the corresponding range of keys1, which will be valid pub(super) fn get_range(&self) -> Range<usize> { debug_assert!(self.key0_index < self.joiner.len()); let start = if self.key0_index == 0 { 0 } else { #[allow(clippy::unwrap_used)] // protected by the debug_assert above self.joiner.get(self.key0_index - 1).unwrap() }; #[allow(clippy::unwrap_used)] // protected by the debug_assert above let limit = self.joiner.get(self.key0_index).unwrap(); // These two assertions are true based on the invariants of ZeroMap2d debug_assert!(start < limit); debug_assert!((limit as usize) <= self.values.zvl_len()); (start as usize)..(limit as usize) } } impl<'l, 'a, K0, K1, V> ZeroMap2dCursor<'l, 'a, K0, K1, V> where K0: ZeroMapKV<'a>, K1: ZeroMapKV<'a>, V: ZeroMapKV<'a>, K0: ?Sized, K1: ?Sized, V: Copy, { /// Borrow an ordered iterator over keys1 and values for a particular key0. /// /// The values are returned as copy types. /// /// # Examples /// /// ``` /// use zerovec::ZeroMap2d; /// /// let zm2d: ZeroMap2d<str, u8, u16> = /// [("a", 0u8, 1u16), ("b", 1u8, 1000u16), ("b", 2u8, 2000u16)] /// .into_iter() /// .collect(); /// /// let mut total_value = 0; /// /// for cursor in zm2d.iter0() { /// for (_, value) in cursor.iter1_copied() { /// total_value += value; /// } /// } /// /// assert_eq!(total_value, 3001); /// ``` pub fn iter1_copied( &self, ) -> impl DoubleEndedIterator<Item = (&'l <K1 as ZeroMapKV<'a>>::GetType, V)> + ExactSizeIterator + '_ { let range = self.get_range(); #[allow(clippy::unwrap_used)] // `self.get_range()` returns a valid range range.map(move |idx| { ( self.keys1.zvl_get(idx).unwrap(), self.get1_copied_at(idx).unwrap(), ) }) } /// Transform this cursor into an ordered iterator over keys1 for a particular key0. /// /// The values are returned as copy types. /// /// # Examples /// /// ``` /// use zerovec::ZeroMap2d; /// /// let zm2d: ZeroMap2d<str, u8, u16> = /// [("a", 0u8, 1u16), ("b", 1u8, 1000u16), ("b", 2u8, 2000u16)] /// .into_iter() /// .collect(); /// /// let mut total_value = 0; /// /// for cursor in zm2d.iter0() { /// for (_, value) in cursor.into_iter1_copied() { /// total_value += value; /// } /// } /// /// assert_eq!(total_value, 3001); /// ``` pub fn into_iter1_copied( self, ) -> impl DoubleEndedIterator<Item = (&'l <K1 as ZeroMapKV<'a>>::GetType, V)> + ExactSizeIterator { let range = self.get_range(); #[allow(clippy::unwrap_used)] // `self.get_range()` returns a valid range range.map(move |idx| { ( self.keys1.zvl_get(idx).unwrap(), self.get1_copied_at(idx).unwrap(), ) }) } fn get1_copied_at(&self, index: usize) -> Option<V> { let ule = self.values.zvl_get(index)?; let mut result = Option::<V>::None; V::Container::zvl_get_as_t(ule, |v| result.replace(*v)); #[allow(clippy::unwrap_used)] // `zvl_get_as_t` guarantees that the callback is invoked Some(result.unwrap()) } } impl<'l, 'a, K0, K1, V> ZeroMap2dCursor<'l, 'a, K0, K1, V> where K0: ZeroMapKV<'a>, K1: ZeroMapKV<'a> + Ord, V: ZeroMapKV<'a>, K0: ?Sized, K1: ?Sized, V: ?Sized, { /// Gets the value for a key1 from this cursor, or `None` if key1 is not in the map. /// /// ```rust /// use zerovec::ZeroMap2d; /// /// let mut map = ZeroMap2d::new(); /// map.insert("one", &1u32, "foo"); /// assert_eq!(map.get0("one").unwrap().get1(&1), Some("foo")); /// assert_eq!(map.get0("one").unwrap().get1(&2), None); /// ``` pub fn get1(&self, key1: &K1) -> Option<&'l V::GetType> { let key1_index = self.get_key1_index(key1)?; #[allow(clippy::unwrap_used)] // key1_index is valid Some(self.values.zvl_get(key1_index).unwrap()) } /// Gets the value for a predicate from this cursor, or `None` if key1 is not in the map. /// /// ```rust /// use zerovec::ZeroMap2d; /// /// let mut map = ZeroMap2d::new(); /// map.insert("one", &1u32, "foo"); /// assert_eq!(map.get0("one").unwrap().get1_by(|v| v.cmp(&1)), Some("foo")); /// assert_eq!(map.get0("one").unwrap().get1_by(|v| v.cmp(&2)), None); /// ``` pub fn get1_by(&self, predicate: impl FnMut(&K1) -> Ordering) -> Option<&'l V::GetType> { let key1_index = self.get_key1_index_by(predicate)?; #[allow(clippy::unwrap_used)] // key1_index is valid Some(self.values.zvl_get(key1_index).unwrap()) } /// Given key0_index and predicate, returns the index into the values array fn get_key1_index_by(&self, predicate: impl FnMut(&K1) -> Ordering) -> Option<usize> { let range = self.get_range(); debug_assert!(range.start < range.end); // '<' because every key0 should have a key1 debug_assert!(range.end <= self.keys1.zvl_len()); let start = range.start; #[allow(clippy::expect_used)] // protected by the debug_assert above let binary_search_result = self .keys1 .zvl_binary_search_in_range_by(predicate, range) .expect("in-bounds range"); binary_search_result.ok().map(move |s| s + start) } /// Given key0_index and key1, returns the index into the values array fn get_key1_index(&self, key1: &K1) -> Option<usize> { let range = self.get_range(); debug_assert!(range.start < range.end); // '<' because every key0 should have a key1 debug_assert!(range.end <= self.keys1.zvl_len()); let start = range.start; #[allow(clippy::expect_used)] // protected by the debug_assert above let binary_search_result = self .keys1 .zvl_binary_search_in_range(key1, range) .expect("in-bounds range"); binary_search_result.ok().map(move |s| s + start) } } impl<'l, 'a, K0, K1, V> ZeroMap2dCursor<'l, 'a, K0, K1, V> where K0: ZeroMapKV<'a>, K1: ZeroMapKV<'a> + Ord, V: ZeroMapKV<'a>, V: Copy, K0: ?Sized, K1: ?Sized, { /// For cases when `V` is fixed-size, obtain a direct copy of `V` instead of `V::ULE` /// /// ```rust /// use zerovec::ZeroMap2d; /// /// let mut map: ZeroMap2d<u16, u16, u16> = ZeroMap2d::new(); /// map.insert(&1, &2, &3); /// map.insert(&1, &4, &5); /// map.insert(&6, &7, &8); /// /// assert_eq!(map.get0(&6).unwrap().get1_copied(&7), Some(8)); /// ``` #[inline] pub fn get1_copied(&self, key1: &K1) -> Option<V> { let key1_index = self.get_key1_index(key1)?; self.get1_copied_at(key1_index) } /// For cases when `V` is fixed-size, obtain a direct copy of `V` instead of `V::ULE` #[inline] pub fn get1_copied_by(&self, predicate: impl FnMut(&K1) -> Ordering) -> Option<V> { let key1_index = self.get_key1_index_by(predicate)?; self.get1_copied_at(key1_index) } } // We can't use the default PartialEq because ZeroMap2d is invariant // so otherwise rustc will not automatically allow you to compare ZeroMaps // with different lifetimes impl<'m, 'n, 'a, 'b, K0, K1, V> PartialEq<ZeroMap2dCursor<'n, 'b, K0, K1, V>> for ZeroMap2dCursor<'m, 'a, K0, K1, V> where K0: for<'c> ZeroMapKV<'c> + ?Sized, K1: for<'c> ZeroMapKV<'c> + ?Sized, V: for<'c> ZeroMapKV<'c> + ?Sized, <K0 as ZeroMapKV<'a>>::Slice: PartialEq<<K0 as ZeroMapKV<'b>>::Slice>, <K1 as ZeroMapKV<'a>>::Slice: PartialEq<<K1 as ZeroMapKV<'b>>::Slice>, <V as ZeroMapKV<'a>>::Slice: PartialEq<<V as ZeroMapKV<'b>>::Slice>, { fn eq(&self, other: &ZeroMap2dCursor<'n, 'b, K0, K1, V>) -> bool { self.keys0.eq(other.keys0) && self.joiner.eq(other.joiner) && self.keys1.eq(other.keys1) && self.values.eq(other.values) && self.key0_index.eq(&other.key0_index) } } impl<'l, 'a, K0, K1, V> fmt::Debug for ZeroMap2dCursor<'l, 'a, K0, K1, V> where K0: ZeroMapKV<'a> + ?Sized, K1: ZeroMapKV<'a> + ?Sized, V: ZeroMapKV<'a> + ?Sized, K0::Slice: fmt::Debug, K1::Slice: fmt::Debug, V::Slice: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { f.debug_struct("ZeroMap2d") .field("keys0", &self.keys0) .field("joiner", &self.joiner) .field("keys1", &self.keys1) .field("values", &self.values) .field("key0_index", &self.key0_index) .finish() } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use crate::{maps::ZeroMap2dBorrowed, maps::ZeroMapKV, ZeroMap2d}; use databake::*; impl<'a, K0, K1, V> Bake for ZeroMap2d<'a, K0, K1, V> where K0: ZeroMapKV<'a> + ?Sized, K1: ZeroMapKV<'a> + ?Sized, V: ZeroMapKV<'a> + ?Sized, K0::Container: Bake, K1::Container: Bake, V::Container: Bake, { fn bake(&self, env: &CrateEnv) -> TokenStream { env.insert("zerovec"); let keys0 = self.keys0.bake(env); let joiner = self.joiner.bake(env); let keys1 = self.keys1.bake(env); let values = self.values.bake(env); quote! { unsafe { #[allow(unused_unsafe)] zerovec::ZeroMap2d::from_parts_unchecked(#keys0, #joiner, #keys1, #values) } } } } impl<'a, K0, K1, V> BakeSize for ZeroMap2d<'a, K0, K1, V> where K0: ZeroMapKV<'a> + ?Sized, K1: ZeroMapKV<'a> + ?Sized, V: ZeroMapKV<'a> + ?Sized, K0::Container: BakeSize, K1::Container: BakeSize, V::Container: BakeSize, { fn borrows_size(&self) -> usize { self.keys0.borrows_size() + self.joiner.borrows_size() + self.keys1.borrows_size() + self.values.borrows_size() } } impl<'a, K0, K1, V> Bake for ZeroMap2dBorrowed<'a, K0, K1, V> where K0: ZeroMapKV<'a> + ?Sized, K1: ZeroMapKV<'a> + ?Sized, V: ZeroMapKV<'a> + ?Sized, &'a K0::Slice: Bake, &'a K1::Slice: Bake, &'a V::Slice: Bake, { fn bake(&self, env: &CrateEnv) -> TokenStream { env.insert("zerovec"); let keys0 = self.keys0.bake(env); let joiner = self.joiner.bake(env); let keys1 = self.keys1.bake(env); let values = self.values.bake(env); quote! { unsafe { #[allow(unused_unsafe)] zerovec::maps::ZeroMap2dBorrowed::from_parts_unchecked(#keys0, #joiner, #keys1, #values) } } } } impl<'a, K0, K1, V> BakeSize for ZeroMap2dBorrowed<'a, K0, K1, V> where K0: ZeroMapKV<'a> + ?Sized, K1: ZeroMapKV<'a> + ?Sized, V: ZeroMapKV<'a> + ?Sized, &'a K0::Slice: BakeSize, &'a K1::Slice: BakeSize, &'a V::Slice: BakeSize, { fn borrows_size(&self) -> usize { self.keys0.borrows_size() + self.joiner.borrows_size() + self.keys1.borrows_size() + self.values.borrows_size() } } #[test] fn test_baked_map() { test_bake!( ZeroMap2d<str, str, str>, const, unsafe { #[allow(unused_unsafe)] crate::ZeroMap2d::from_parts_unchecked( unsafe { crate::vecs::VarZeroVec16::from_bytes_unchecked( b"\x0E\0\0\0\0\0\x05\0\x07\0\t\0\x0B\0\x10\0\x12\0\x14\0\x1C\0\x1E\0#\0%\0'\0,\0arcazcuenffgrckkkukylifmanmnpapalsdtgugunruzyuezh" ) }, unsafe { crate::ZeroVec::from_bytes_unchecked( b"\x02\0\0\0\x03\0\0\0\x04\0\0\0\x05\0\0\0\x06\0\0\0\x07\0\0\0\x08\0\0\0\n\0\0\0\x0C\0\0\0\r\0\0\0\x0E\0\0\0\x0F\0\0\0\x10\0\0\0\x11\0\0\0\x14\0\0\0\x15\0\0\0\x16\0\0\0\x17\0\0\0\x18\0\0\0\x19\0\0\0\x1C\0\0\0" ) }, unsafe { crate::vecs::VarZeroVec16::from_bytes_unchecked( b"\x1C\0\0\0\0\0\x04\0\x08\0\x0C\0\x10\0\x14\0\x18\0\x1C\0 \0$\0(\0,\x000\x004\08\0<\0@\0D\0H\0L\0P\0T\0X\0\\\0`\0d\0h\0l\0NbatPalmArabGlagShawAdlmLinbArabArabYeziArabLatnLimbNkooMongArabPhlpDevaKhojSindArabCyrlDevaArabHansBopoHanbHant" ) }, unsafe { crate::vecs::VarZeroVec16::from_bytes_unchecked( b"\x1C\0\0\0\0\0\x02\0\x04\0\x06\0\x08\0\n\0\x0C\0\x0E\0\x10\0\x12\0\x14\0\x16\0\x18\0\x1A\0\x1C\0\x1E\0 \0\"\0$\0&\0(\0*\0,\0.\x000\x002\x004\x006\0JOSYIRBGGBGNGRCNIQGECNTRINGNCNPKCNINININPKKZNPAFCNTWTWTW" ) }, ) }, zerovec ); } #[test] fn test_baked_borrowed_map() { test_bake!( ZeroMap2dBorrowed<str, str, str>, const, unsafe { #[allow(unused_unsafe)] crate::maps::ZeroMap2dBorrowed::from_parts_unchecked( unsafe { crate::vecs::VarZeroSlice16::from_bytes_unchecked( b"\x0E\0\0\0\0\0\x05\0\x07\0\t\0\x0B\0\x10\0\x12\0\x14\0\x1C\0\x1E\0#\0%\0'\0,\0arcazcuenffgrckkkukylifmanmnpapalsdtgugunruzyuezh" ) }, unsafe { crate::ZeroSlice::from_bytes_unchecked( b"\x02\0\0\0\x03\0\0\0\x04\0\0\0\x05\0\0\0\x06\0\0\0\x07\0\0\0\x08\0\0\0\n\0\0\0\x0C\0\0\0\r\0\0\0\x0E\0\0\0\x0F\0\0\0\x10\0\0\0\x11\0\0\0\x14\0\0\0\x15\0\0\0\x16\0\0\0\x17\0\0\0\x18\0\0\0\x19\0\0\0\x1C\0\0\0" ) }, unsafe { crate::vecs::VarZeroSlice16::from_bytes_unchecked( b"\x1C\0\0\0\0\0\x04\0\x08\0\x0C\0\x10\0\x14\0\x18\0\x1C\0 \0$\0(\0,\x000\x004\08\0<\0@\0D\0H\0L\0P\0T\0X\0\\\0`\0d\0h\0l\0NbatPalmArabGlagShawAdlmLinbArabArabYeziArabLatnLimbNkooMongArabPhlpDevaKhojSindArabCyrlDevaArabHansBopoHanbHant" ) }, unsafe { crate::vecs::VarZeroSlice16::from_bytes_unchecked( b"\x1C\0\0\0\0\0\x02\0\x04\0\x06\0\x08\0\n\0\x0C\0\x0E\0\x10\0\x12\0\x14\0\x16\0\x18\0\x1A\0\x1C\0\x1E\0 \0\"\0$\0&\0(\0*\0,\0.\x000\x002\x004\x006\0JOSYIRBGGBGNGRCNIQGECNTRINGNCNPKCNINININPKKZNPAFCNTWTWTW" ) }, ) }, zerovec ); }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use crate::ule::AsULE; use crate::ZeroVec; use alloc::borrow::Borrow; use core::cmp::Ordering; use core::convert::TryFrom; use core::fmt; use core::iter::FromIterator; use core::ops::Range; use super::*; use crate::map::ZeroMapKV; use crate::map::{MutableZeroVecLike, ZeroVecLike}; /// A zero-copy, two-dimensional map datastructure . /// /// This is an extension of [`ZeroMap`] that supports two layers of keys. For example, /// to map a pair of an integer and a string to a buffer, you can write: /// /// ```no_run /// # use zerovec::ZeroMap2d; /// let _: ZeroMap2d<u32, str, [u8]> = unimplemented!(); /// ``` /// /// Internally, `ZeroMap2d` stores four zero-copy vectors, one for each type argument plus /// one more to match between the two vectors of keys. /// /// # Examples /// /// ``` /// use zerovec::ZeroMap2d; /// /// // Example byte buffer representing the map { 1: {2: "three" } } /// let BINCODE_BYTES: &[u8; 47] = &[ /// 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, /// 0, 0, 0, 0, 0, 0, 2, 0, 7, 0, 0, 0, 0, 0, 0, 0, 1, 0, 116, 104, 114, /// 101, 101, /// ]; /// /// // Deserializing to ZeroMap requires no heap allocations. /// let zero_map: ZeroMap2d<u16, u16, str> = /// bincode::deserialize(BINCODE_BYTES) /// .expect("Should deserialize successfully"); /// assert_eq!(zero_map.get_2d(&1, &2), Some("three")); /// ``` /// /// [`VarZeroVec`]: crate::VarZeroVec /// [`ZeroMap`]: crate::ZeroMap // ZeroMap2d contains 4 fields: // // - keys0 = sorted list of all K0 in the map // - joiner = helper vec that maps from a K0 to a range of keys1 // - keys1 = list of all K1 in the map, sorted in ranges for each K0 // - values = list of all values in the map, sorted by (K0, K1) // // For a particular K0 at index i, the range of keys1 corresponding to K0 is // (joiner[i-1]..joiner[i]), where the first range starts at 0. // // Required Invariants: // // 1. len(keys0) == len(joiner) // 2. len(keys1) == len(values) // 3. joiner is sorted // 4. the last element of joiner is the length of keys1 // // Optional Invariants: // // 5. keys0 is sorted (for binary_search) // 6. ranges within keys1 are sorted (for binary_search) // 7. every K0 is associated with at least one K1 (no empty ranges) // // During deserialization, these three invariants are not checked, because they put the // ZeroMap2d in a deterministic state, even though it may have unexpected behavior. pub struct ZeroMap2d<'a, K0, K1, V> where K0: ZeroMapKV<'a>, K1: ZeroMapKV<'a>, V: ZeroMapKV<'a>, K0: ?Sized, K1: ?Sized, V: ?Sized, { pub(crate) keys0: K0::Container, pub(crate) joiner: ZeroVec<'a, u32>, pub(crate) keys1: K1::Container, pub(crate) values: V::Container, } impl<'a, K0, K1, V> Default for ZeroMap2d<'a, K0, K1, V> where K0: ZeroMapKV<'a>, K1: ZeroMapKV<'a>, V: ZeroMapKV<'a>, K0: ?Sized, K1: ?Sized, V: ?Sized, { fn default() -> Self { Self::new() } } impl<'a, K0, K1, V> ZeroMap2d<'a, K0, K1, V> where K0: ZeroMapKV<'a>, K1: ZeroMapKV<'a>, V: ZeroMapKV<'a>, K0: ?Sized, K1: ?Sized, V: ?Sized, { /// Creates a new, empty `ZeroMap2d`. /// /// # Examples /// /// ``` /// use zerovec::ZeroMap2d; /// /// let zm: ZeroMap2d<u16, str, str> = ZeroMap2d::new(); /// assert!(zm.is_empty()); /// ``` pub fn new() -> Self { Self { keys0: K0::Container::zvl_with_capacity(0), joiner: ZeroVec::new(), keys1: K1::Container::zvl_with_capacity(0), values: V::Container::zvl_with_capacity(0), } } #[doc(hidden)] // databake internal pub const unsafe fn from_parts_unchecked( keys0: K0::Container, joiner: ZeroVec<'a, u32>, keys1: K1::Container, values: V::Container, ) -> Self { Self { keys0, joiner, keys1, values, } } /// Construct a new [`ZeroMap2d`] with a given capacity pub fn with_capacity(capacity: usize) -> Self { Self { keys0: K0::Container::zvl_with_capacity(capacity), joiner: ZeroVec::with_capacity(capacity), keys1: K1::Container::zvl_with_capacity(capacity), values: V::Container::zvl_with_capacity(capacity), } } /// Obtain a borrowed version of this map pub fn as_borrowed(&'a self) -> ZeroMap2dBorrowed<'a, K0, K1, V> { ZeroMap2dBorrowed { keys0: self.keys0.zvl_as_borrowed(), joiner: &self.joiner, keys1: self.keys1.zvl_as_borrowed(), values: self.values.zvl_as_borrowed(), } } /// The number of values in the [`ZeroMap2d`] pub fn len(&self) -> usize { self.values.zvl_len() } /// Whether the [`ZeroMap2d`] is empty pub fn is_empty(&self) -> bool { self.values.zvl_len() == 0 } /// Remove all elements from the [`ZeroMap2d`] pub fn clear(&mut self) { self.keys0.zvl_clear(); self.joiner.clear(); self.keys1.zvl_clear(); self.values.zvl_clear(); } /// Reserve capacity for `additional` more elements to be inserted into /// the [`ZeroMap2d`] to avoid frequent reallocations. /// /// See [`Vec::reserve()`](alloc::vec::Vec::reserve) for more information. pub fn reserve(&mut self, additional: usize) { self.keys0.zvl_reserve(additional); self.joiner.zvl_reserve(additional); self.keys1.zvl_reserve(additional); self.values.zvl_reserve(additional); } /// Produce an ordered iterator over keys0, which can then be used to get an iterator /// over keys1 for a particular key0. /// /// # Example /// /// Loop over all elements of a ZeroMap2d: /// /// ``` /// use zerovec::ZeroMap2d; /// /// let mut map: ZeroMap2d<u16, u16, str> = ZeroMap2d::new(); /// map.insert(&1, &1, "foo"); /// map.insert(&2, &3, "bar"); /// map.insert(&2, &4, "baz"); /// /// let mut total_value = 0; /// /// for cursor in map.iter0() { /// for (key1, value) in cursor.iter1() { /// // This code runs for every (key0, key1) pair /// total_value += cursor.key0().as_unsigned_int() as usize; /// total_value += key1.as_unsigned_int() as usize; /// total_value += value.len(); /// } /// } /// /// assert_eq!(total_value, 22); /// ``` pub fn iter0<'l>(&'l self) -> impl Iterator<Item = ZeroMap2dCursor<'l, 'a, K0, K1, V>> + 'l { (0..self.keys0.zvl_len()).map(move |idx| ZeroMap2dCursor::from_cow(self, idx)) } // INTERNAL ROUTINES FOLLOW // /// Given an index into the joiner array, returns the corresponding range of keys1 fn get_range_for_key0_index(&self, key0_index: usize) -> Range<usize> { ZeroMap2dCursor::from_cow(self, key0_index).get_range() } /// Removes key0_index from the keys0 array and the joiner array fn remove_key0_index(&mut self, key0_index: usize) { self.keys0.zvl_remove(key0_index); self.joiner.with_mut(|v| v.remove(key0_index)); } /// Shifts all joiner ranges from key0_index onward one index up fn joiner_expand(&mut self, key0_index: usize) { #[allow(clippy::expect_used)] // slice overflow self.joiner .to_mut_slice() .iter_mut() .skip(key0_index) .for_each(|ref mut v| { // TODO(#1410): Make this fallible **v = v .as_unsigned_int() .checked_add(1) .expect("Attempted to add more than 2^32 elements to a ZeroMap2d") .to_unaligned() }) } /// Shifts all joiner ranges from key0_index onward one index down fn joiner_shrink(&mut self, key0_index: usize) { self.joiner .to_mut_slice() .iter_mut() .skip(key0_index) .for_each(|ref mut v| **v = (v.as_unsigned_int() - 1).to_unaligned()) } } impl<'a, K0, K1, V> ZeroMap2d<'a, K0, K1, V> where K0: ZeroMapKV<'a> + Ord, K1: ZeroMapKV<'a> + Ord, V: ZeroMapKV<'a>, K0: ?Sized, K1: ?Sized, V: ?Sized, { /// Get the value associated with `key0` and `key1`, if it exists. /// /// For more fine-grained error handling, use [`ZeroMap2d::get0`]. /// /// ```rust /// use zerovec::ZeroMap2d; /// /// let mut map = ZeroMap2d::new(); /// map.insert(&1, "one", "foo"); /// map.insert(&2, "one", "bar"); /// map.insert(&2, "two", "baz"); /// assert_eq!(map.get_2d(&1, "one"), Some("foo")); /// assert_eq!(map.get_2d(&1, "two"), None); /// assert_eq!(map.get_2d(&2, "one"), Some("bar")); /// assert_eq!(map.get_2d(&2, "two"), Some("baz")); /// assert_eq!(map.get_2d(&3, "three"), None); /// ``` pub fn get_2d(&self, key0: &K0, key1: &K1) -> Option<&V::GetType> { self.get0(key0)?.get1(key1) } /// Insert `value` with `key`, returning the existing value if it exists. /// /// ```rust /// use zerovec::ZeroMap2d; /// /// let mut map = ZeroMap2d::new(); /// assert_eq!(map.insert(&0, "zero", "foo"), None,); /// assert_eq!(map.insert(&1, "one", "bar"), None,); /// assert_eq!(map.insert(&1, "one", "baz").as_deref(), Some("bar"),); /// assert_eq!(map.get_2d(&1, "one").as_deref(), Some("baz")); /// assert_eq!(map.len(), 2); /// ``` pub fn insert(&mut self, key0: &K0, key1: &K1, value: &V) -> Option<V::OwnedType> { let (key0_index, range) = self.get_or_insert_range_for_key0(key0); debug_assert!(range.start <= range.end); // '<=' because we may have inserted a new key0 debug_assert!(range.end <= self.keys1.zvl_len()); let range_start = range.start; #[allow(clippy::unwrap_used)] // by debug_assert! invariants let index = range_start + match self.keys1.zvl_binary_search_in_range(key1, range).unwrap() { Ok(index) => return Some(self.values.zvl_replace(range_start + index, value)), Err(index) => index, }; self.keys1.zvl_insert(index, key1); self.values.zvl_insert(index, value); self.joiner_expand(key0_index); #[cfg(debug_assertions)] self.check_invariants(); None } /// Remove the value at `key`, returning it if it exists. /// /// ```rust /// use zerovec::ZeroMap2d; /// /// let mut map = ZeroMap2d::new(); /// map.insert(&1, "one", "foo"); /// map.insert(&2, "two", "bar"); /// assert_eq!( /// map.remove(&1, "one"), /// Some("foo".to_owned().into_boxed_str()) /// ); /// assert_eq!(map.get_2d(&1, "one"), None); /// assert_eq!(map.remove(&1, "one"), None); /// ``` pub fn remove(&mut self, key0: &K0, key1: &K1) -> Option<V::OwnedType> { let key0_index = self.keys0.zvl_binary_search(key0).ok()?; let range = self.get_range_for_key0_index(key0_index); debug_assert!(range.start < range.end); // '<' because every key0 should have a key1 debug_assert!(range.end <= self.keys1.zvl_len()); let is_singleton_range = range.start + 1 == range.end; #[allow(clippy::unwrap_used)] // by debug_assert invariants let index = range.start + self .keys1 .zvl_binary_search_in_range(key1, range) .unwrap() .ok()?; self.keys1.zvl_remove(index); let removed = self.values.zvl_remove(index); self.joiner_shrink(key0_index); if is_singleton_range { self.remove_key0_index(key0_index); } #[cfg(debug_assertions)] self.check_invariants(); Some(removed) } /// Appends `value` with `key` to the end of the underlying vector, returning /// `key` and `value` _if it failed_. Useful for extending with an existing /// sorted list. /// /// ```rust /// use zerovec::ZeroMap2d; /// /// let mut map = ZeroMap2d::new(); /// assert!(map.try_append(&1, "one", "uno").is_none()); /// assert!(map.try_append(&3, "three", "tres").is_none()); /// /// let unsuccessful = map.try_append(&3, "three", "tres-updated"); /// assert!(unsuccessful.is_some(), "append duplicate of last key"); /// /// let unsuccessful = map.try_append(&2, "two", "dos"); /// assert!(unsuccessful.is_some(), "append out of order"); /// /// assert_eq!(map.get_2d(&1, "one"), Some("uno")); /// /// // contains the original value for the key: 3 /// assert_eq!(map.get_2d(&3, "three"), Some("tres")); /// /// // not appended since it wasn't in order /// assert_eq!(map.get_2d(&2, "two"), None); /// ``` #[must_use] pub fn try_append<'b>( &mut self, key0: &'b K0, key1: &'b K1, value: &'b V, ) -> Option<(&'b K0, &'b K1, &'b V)> { if self.is_empty() { self.keys0.zvl_push(key0); self.joiner.with_mut(|v| v.push(1u32.to_unaligned())); self.keys1.zvl_push(key1); self.values.zvl_push(value); return None; } // The unwraps are protected by the fact that we are not empty #[allow(clippy::unwrap_used)] let last_key0 = self.keys0.zvl_get(self.keys0.zvl_len() - 1).unwrap(); let key0_cmp = K0::Container::t_cmp_get(key0, last_key0); #[allow(clippy::unwrap_used)] let last_key1 = self.keys1.zvl_get(self.keys1.zvl_len() - 1).unwrap(); let key1_cmp = K1::Container::t_cmp_get(key1, last_key1); // Check for error case (out of order) match key0_cmp { Ordering::Less => { // Error case return Some((key0, key1, value)); } Ordering::Equal => { match key1_cmp { Ordering::Less | Ordering::Equal => { // Error case return Some((key0, key1, value)); } _ => {} } } _ => {} } #[allow(clippy::expect_used)] // slice overflow let joiner_value = u32::try_from(self.keys1.zvl_len() + 1) .expect("Attempted to add more than 2^32 elements to a ZeroMap2d"); // All OK to append #[allow(clippy::unwrap_used)] if key0_cmp == Ordering::Greater { self.keys0.zvl_push(key0); self.joiner .with_mut(|v| v.push(joiner_value.to_unaligned())); } else { // This unwrap is protected because we are not empty *self.joiner.to_mut_slice().last_mut().unwrap() = joiner_value.to_unaligned(); } self.keys1.zvl_push(key1); self.values.zvl_push(value); #[cfg(debug_assertions)] self.check_invariants(); None } // INTERNAL ROUTINES FOLLOW // #[cfg(debug_assertions)] #[allow(clippy::unwrap_used)] // this is an assertion function pub(crate) fn check_invariants(&self) { debug_assert_eq!(self.keys0.zvl_len(), self.joiner.len()); debug_assert_eq!(self.keys1.zvl_len(), self.values.zvl_len()); debug_assert!(self.keys0.zvl_is_ascending()); debug_assert!(self.joiner.zvl_is_ascending()); if let Some(last_joiner) = self.joiner.last() { debug_assert_eq!(last_joiner as usize, self.keys1.zvl_len()); } for i in 0..self.joiner.len() { let j0 = if i == 0 { 0 } else { self.joiner.get(i - 1).unwrap() as usize }; let j1 = self.joiner.get(i).unwrap() as usize; debug_assert_ne!(j0, j1); for j in (j0 + 1)..j1 { let m0 = self.keys1.zvl_get(j - 1).unwrap(); let m1 = self.keys1.zvl_get(j).unwrap(); debug_assert_eq!(Ordering::Less, K1::Container::get_cmp_get(m0, m1)); } } } } impl<'a, K0, K1, V> ZeroMap2d<'a, K0, K1, V> where K0: ZeroMapKV<'a> + Ord, K1: ZeroMapKV<'a>, V: ZeroMapKV<'a>, K0: ?Sized, K1: ?Sized, V: ?Sized, { /// Gets a cursor for `key0`. If `None`, then `key0` is not in the map. If `Some`, /// then `key0` is in the map, and `key1` can be queried. /// /// ```rust /// use zerovec::ZeroMap2d; /// /// let mut map = ZeroMap2d::new(); /// map.insert(&1u32, "one", "foo"); /// map.insert(&2, "one", "bar"); /// map.insert(&2, "two", "baz"); /// assert_eq!(map.get0(&1).unwrap().get1("one").unwrap(), "foo"); /// assert_eq!(map.get0(&1).unwrap().get1("two"), None); /// assert_eq!(map.get0(&2).unwrap().get1("one").unwrap(), "bar"); /// assert_eq!(map.get0(&2).unwrap().get1("two").unwrap(), "baz"); /// assert_eq!(map.get0(&3), None); /// ``` #[inline] pub fn get0<'l>(&'l self, key0: &K0) -> Option<ZeroMap2dCursor<'l, 'a, K0, K1, V>> { let key0_index = self.keys0.zvl_binary_search(key0).ok()?; Some(ZeroMap2dCursor::from_cow(self, key0_index)) } /// Binary search the map for `key0`, returning a cursor. /// /// ```rust /// use zerovec::ZeroMap2d; /// /// let mut map = ZeroMap2d::new(); /// map.insert(&1, "one", "foo"); /// map.insert(&2, "two", "bar"); /// assert!(matches!(map.get0_by(|probe| probe.cmp(&1)), Some(_))); /// assert!(matches!(map.get0_by(|probe| probe.cmp(&3)), None)); /// ``` pub fn get0_by<'l>( &'l self, predicate: impl FnMut(&K0) -> Ordering, ) -> Option<ZeroMap2dCursor<'l, 'a, K0, K1, V>> { let key0_index = self.keys0.zvl_binary_search_by(predicate).ok()?; Some(ZeroMap2dCursor::from_cow(self, key0_index)) } /// Returns whether `key0` is contained in this map /// /// ```rust /// use zerovec::ZeroMap2d; /// /// let mut map = ZeroMap2d::new(); /// map.insert(&1, "one", "foo"); /// map.insert(&2, "two", "bar"); /// assert!(map.contains_key0(&1)); /// assert!(!map.contains_key0(&3)); /// ``` pub fn contains_key0(&self, key0: &K0) -> bool { self.keys0.zvl_binary_search(key0).is_ok() } // INTERNAL ROUTINES FOLLOW // /// Same as `get_range_for_key0`, but creates key0 if it doesn't already exist fn get_or_insert_range_for_key0(&mut self, key0: &K0) -> (usize, Range<usize>) { match self.keys0.zvl_binary_search(key0) { Ok(key0_index) => (key0_index, self.get_range_for_key0_index(key0_index)), Err(key0_index) => { // Add an entry to self.keys0 and self.joiner let joiner_value = if key0_index == 0 { 0 } else { debug_assert!(key0_index <= self.joiner.len()); // The unwrap is protected by the debug_assert above and key0_index != 0 #[allow(clippy::unwrap_used)] self.joiner.get(key0_index - 1).unwrap() }; self.keys0.zvl_insert(key0_index, key0); self.joiner .with_mut(|v| v.insert(key0_index, joiner_value.to_unaligned())); (key0_index, (joiner_value as usize)..(joiner_value as usize)) } } } } impl<'a, K0, K1, V> ZeroMap2d<'a, K0, K1, V> where K0: ZeroMapKV<'a> + Ord, K1: ZeroMapKV<'a> + Ord, V: ZeroMapKV<'a>, V: Copy, K0: ?Sized, K1: ?Sized, { /// For cases when `V` is fixed-size, obtain a direct copy of `V` instead of `V::ULE` /// /// # Examples /// /// ``` /// # use zerovec::ZeroMap2d; /// let mut map: ZeroMap2d<u16, u16, u16> = ZeroMap2d::new(); /// map.insert(&1, &2, &3); /// map.insert(&1, &4, &5); /// map.insert(&6, &7, &8); /// /// assert_eq!(map.get_copied_2d(&6, &7), Some(8)); /// ``` #[inline] pub fn get_copied_2d(&self, key0: &K0, key1: &K1) -> Option<V> { self.get0(key0)?.get1_copied(key1) } } impl<'a, K0, K1, V> From<ZeroMap2dBorrowed<'a, K0, K1, V>> for ZeroMap2d<'a, K0, K1, V> where K0: ZeroMapKV<'a>, K1: ZeroMapKV<'a>, V: ZeroMapKV<'a>, K0: ?Sized, K1: ?Sized, V: ?Sized, { fn from(other: ZeroMap2dBorrowed<'a, K0, K1, V>) -> Self { Self { keys0: K0::Container::zvl_from_borrowed(other.keys0), joiner: other.joiner.as_zerovec(), keys1: K1::Container::zvl_from_borrowed(other.keys1), values: V::Container::zvl_from_borrowed(other.values), } } } // We can't use the default PartialEq because ZeroMap2d is invariant // so otherwise rustc will not automatically allow you to compare ZeroMaps // with different lifetimes impl<'a, 'b, K0, K1, V> PartialEq<ZeroMap2d<'b, K0, K1, V>> for ZeroMap2d<'a, K0, K1, V> where K0: for<'c> ZeroMapKV<'c> + ?Sized, K1: for<'c> ZeroMapKV<'c> + ?Sized, V: for<'c> ZeroMapKV<'c> + ?Sized, <K0 as ZeroMapKV<'a>>::Container: PartialEq<<K0 as ZeroMapKV<'b>>::Container>, <K1 as ZeroMapKV<'a>>::Container: PartialEq<<K1 as ZeroMapKV<'b>>::Container>, <V as ZeroMapKV<'a>>::Container: PartialEq<<V as ZeroMapKV<'b>>::Container>, { fn eq(&self, other: &ZeroMap2d<'b, K0, K1, V>) -> bool { self.keys0.eq(&other.keys0) && self.joiner.eq(&other.joiner) && self.keys1.eq(&other.keys1) && self.values.eq(&other.values) } } impl<'a, K0, K1, V> fmt::Debug for ZeroMap2d<'a, K0, K1, V> where K0: ZeroMapKV<'a> + ?Sized, K1: ZeroMapKV<'a> + ?Sized, V: ZeroMapKV<'a> + ?Sized, <K0 as ZeroMapKV<'a>>::Container: fmt::Debug, <K1 as ZeroMapKV<'a>>::Container: fmt::Debug, <V as ZeroMapKV<'a>>::Container: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { f.debug_struct("ZeroMap2d") .field("keys0", &self.keys0) .field("joiner", &self.joiner) .field("keys1", &self.keys1) .field("values", &self.values) .finish() } } impl<'a, K0, K1, V> Clone for ZeroMap2d<'a, K0, K1, V> where K0: ZeroMapKV<'a> + ?Sized, K1: ZeroMapKV<'a> + ?Sized, V: ZeroMapKV<'a> + ?Sized, <K0 as ZeroMapKV<'a>>::Container: Clone, <K1 as ZeroMapKV<'a>>::Container: Clone, <V as ZeroMapKV<'a>>::Container: Clone, { fn clone(&self) -> Self { Self { keys0: self.keys0.clone(), joiner: self.joiner.clone(), keys1: self.keys1.clone(), values: self.values.clone(), } } } impl<'a, A, B, C, K0, K1, V> FromIterator<(A, B, C)> for ZeroMap2d<'a, K0, K1, V> where A: Borrow<K0>, B: Borrow<K1>, C: Borrow<V>, K0: ZeroMapKV<'a> + ?Sized + Ord, K1: ZeroMapKV<'a> + ?Sized + Ord, V: ZeroMapKV<'a> + ?Sized, { fn from_iter<T>(iter: T) -> Self where T: IntoIterator<Item = (A, B, C)>, { let iter = iter.into_iter(); let mut map = match iter.size_hint() { (_, Some(upper)) => Self::with_capacity(upper), (lower, None) => Self::with_capacity(lower), }; for (key0, key1, value) in iter { if let Some((key0, key1, value)) = map.try_append(key0.borrow(), key1.borrow(), value.borrow()) { map.insert(key0, key1, value); } } #[cfg(debug_assertions)] map.check_invariants(); map } } #[cfg(test)] mod test { use super::*; use alloc::collections::BTreeMap; #[test] fn stress_test() { let mut zm2d = ZeroMap2d::<u16, str, str>::new(); assert_eq!( format!("{zm2d:?}"), "ZeroMap2d { keys0: ZeroVec([]), joiner: ZeroVec([]), keys1: [], values: [] }" ); assert_eq!(zm2d.get0(&0), None); let result = zm2d.try_append(&3, "ccc", "CCC"); assert!(result.is_none()); assert_eq!(format!("{zm2d:?}"), "ZeroMap2d { keys0: ZeroVec([3]), joiner: ZeroVec([1]), keys1: [\"ccc\"], values: [\"CCC\"] }"); assert_eq!(zm2d.get0(&0), None); assert_eq!(zm2d.get0(&3).unwrap().get1(""), None); assert_eq!(zm2d.get_2d(&3, "ccc"), Some("CCC")); assert_eq!(zm2d.get0(&99), None); let result = zm2d.try_append(&3, "eee", "EEE"); assert!(result.is_none()); assert_eq!(format!("{zm2d:?}"), "ZeroMap2d { keys0: ZeroVec([3]), joiner: ZeroVec([2]), keys1: [\"ccc\", \"eee\"], values: [\"CCC\", \"EEE\"] }"); assert_eq!(zm2d.get0(&0), None); assert_eq!(zm2d.get0(&3).unwrap().get1(""), None); assert_eq!(zm2d.get_2d(&3, "ccc"), Some("CCC")); assert_eq!(zm2d.get_2d(&3, "eee"), Some("EEE")); assert_eq!(zm2d.get0(&3).unwrap().get1("five"), None); assert_eq!(zm2d.get0(&99), None); // Out of order let result = zm2d.try_append(&3, "ddd", "DD0"); assert!(result.is_some()); // Append a few more elements let result = zm2d.try_append(&5, "ddd", "DD1"); assert!(result.is_none()); let result = zm2d.try_append(&7, "ddd", "DD2"); assert!(result.is_none()); let result = zm2d.try_append(&7, "eee", "EEE"); assert!(result.is_none()); let result = zm2d.try_append(&7, "www", "WWW"); assert!(result.is_none()); let result = zm2d.try_append(&9, "yyy", "YYY"); assert!(result.is_none()); assert_eq!(format!("{zm2d:?}"), "ZeroMap2d { keys0: ZeroVec([3, 5, 7, 9]), joiner: ZeroVec([2, 3, 6, 7]), keys1: [\"ccc\", \"eee\", \"ddd\", \"ddd\", \"eee\", \"www\", \"yyy\"], values: [\"CCC\", \"EEE\", \"DD1\", \"DD2\", \"EEE\", \"WWW\", \"YYY\"] }"); assert_eq!(zm2d.get0(&0), None); assert_eq!(zm2d.get0(&3).unwrap().get1(""), None); assert_eq!(zm2d.get_2d(&3, "ccc"), Some("CCC")); assert_eq!(zm2d.get_2d(&3, "eee"), Some("EEE")); assert_eq!(zm2d.get0(&3).unwrap().get1("zzz"), None); assert_eq!(zm2d.get0(&4), None); assert_eq!(zm2d.get0(&5).unwrap().get1("aaa"), None); assert_eq!(zm2d.get_2d(&5, "ddd"), Some("DD1")); assert_eq!(zm2d.get0(&5).unwrap().get1("zzz"), None); assert_eq!(zm2d.get0(&6), None); assert_eq!(zm2d.get0(&7).unwrap().get1("aaa"), None); assert_eq!(zm2d.get_2d(&7, "ddd"), Some("DD2")); assert_eq!(zm2d.get_2d(&7, "eee"), Some("EEE")); assert_eq!(zm2d.get_2d(&7, "www"), Some("WWW")); assert_eq!(zm2d.get0(&7).unwrap().get1("yyy"), None); assert_eq!(zm2d.get0(&7).unwrap().get1("zzz"), None); assert_eq!(zm2d.get0(&8), None); assert_eq!(zm2d.get0(&9).unwrap().get1("aaa"), None); assert_eq!(zm2d.get0(&9).unwrap().get1("www"), None); assert_eq!(zm2d.get_2d(&9, "yyy"), Some("YYY")); assert_eq!(zm2d.get0(&9).unwrap().get1("zzz"), None); assert_eq!(zm2d.get0(&10), None); assert_eq!(zm2d.get0(&99), None); // Insert some elements zm2d.insert(&3, "mmm", "MM0"); zm2d.insert(&6, "ddd", "DD3"); zm2d.insert(&6, "mmm", "MM1"); zm2d.insert(&6, "nnn", "NNN"); assert_eq!(format!("{zm2d:?}"), "ZeroMap2d { keys0: ZeroVec([3, 5, 6, 7, 9]), joiner: ZeroVec([3, 4, 7, 10, 11]), keys1: [\"ccc\", \"eee\", \"mmm\", \"ddd\", \"ddd\", \"mmm\", \"nnn\", \"ddd\", \"eee\", \"www\", \"yyy\"], values: [\"CCC\", \"EEE\", \"MM0\", \"DD1\", \"DD3\", \"MM1\", \"NNN\", \"DD2\", \"EEE\", \"WWW\", \"YYY\"] }"); assert_eq!(zm2d.get0(&0), None); assert_eq!(zm2d.get0(&3).unwrap().get1(""), None); assert_eq!(zm2d.get_2d(&3, "ccc"), Some("CCC")); assert_eq!(zm2d.get_2d(&3, "eee"), Some("EEE")); assert_eq!(zm2d.get_2d(&3, "mmm"), Some("MM0")); assert_eq!(zm2d.get0(&3).unwrap().get1("zzz"), None); assert_eq!(zm2d.get0(&4), None); assert_eq!(zm2d.get0(&5).unwrap().get1("aaa"), None); assert_eq!(zm2d.get_2d(&5, "ddd"), Some("DD1")); assert_eq!(zm2d.get0(&5).unwrap().get1("zzz"), None); assert_eq!(zm2d.get0(&6).unwrap().get1("aaa"), None); assert_eq!(zm2d.get_2d(&6, "ddd"), Some("DD3")); assert_eq!(zm2d.get_2d(&6, "mmm"), Some("MM1")); assert_eq!(zm2d.get_2d(&6, "nnn"), Some("NNN")); assert_eq!(zm2d.get0(&6).unwrap().get1("zzz"), None); assert_eq!(zm2d.get0(&7).unwrap().get1("aaa"), None); assert_eq!(zm2d.get_2d(&7, "ddd"), Some("DD2")); assert_eq!(zm2d.get_2d(&7, "eee"), Some("EEE")); assert_eq!(zm2d.get_2d(&7, "www"), Some("WWW")); assert_eq!(zm2d.get0(&7).unwrap().get1("yyy"), None); assert_eq!(zm2d.get0(&7).unwrap().get1("zzz"), None); assert_eq!(zm2d.get0(&8), None); assert_eq!(zm2d.get0(&9).unwrap().get1("aaa"), None); assert_eq!(zm2d.get0(&9).unwrap().get1("www"), None); assert_eq!(zm2d.get_2d(&9, "yyy"), Some("YYY")); assert_eq!(zm2d.get0(&9).unwrap().get1("zzz"), None); assert_eq!(zm2d.get0(&10), None); assert_eq!(zm2d.get0(&99), None); // Remove some elements let result = zm2d.remove(&3, "ccc"); // first element assert_eq!(result.as_deref(), Some("CCC")); let result = zm2d.remove(&3, "mmm"); // middle element assert_eq!(result.as_deref(), Some("MM0")); let result = zm2d.remove(&5, "ddd"); // singleton K0 assert_eq!(result.as_deref(), Some("DD1")); let result = zm2d.remove(&9, "yyy"); // last element assert_eq!(result.as_deref(), Some("YYY")); assert_eq!(format!("{zm2d:?}"), "ZeroMap2d { keys0: ZeroVec([3, 6, 7]), joiner: ZeroVec([1, 4, 7]), keys1: [\"eee\", \"ddd\", \"mmm\", \"nnn\", \"ddd\", \"eee\", \"www\"], values: [\"EEE\", \"DD3\", \"MM1\", \"NNN\", \"DD2\", \"EEE\", \"WWW\"] }"); } #[test] fn zeromap2d_metazone() { let source_data = [ (*b"aedxb", 0, Some(*b"gulf")), (*b"afkbl", 0, Some(*b"afgh")), (*b"ushnl", 0, None), (*b"ushnl", 7272660, Some(*b"haal")), (*b"ushnl", 0, None), (*b"ushnl", 7272660, Some(*b"haal")), ]; let btreemap: BTreeMap<([u8; 5], i32), Option<[u8; 4]>> = source_data .iter() .copied() .map(|(a, b, c)| ((a, b), c)) .collect(); let zeromap2d: ZeroMap2d<[u8; 5], i32, Option<[u8; 4]>> = source_data.iter().copied().collect(); let mut btreemap_iter = btreemap.iter(); for cursor in zeromap2d.iter0() { for (key1, value) in cursor.iter1() { // This code runs for every (key0, key1) pair in order let expected = btreemap_iter.next().unwrap(); assert_eq!( (expected.0 .0, expected.0 .1, expected.1), (*cursor.key0(), key1.as_unsigned_int() as i32, &value.get()) ); } } assert!(btreemap_iter.next().is_none()); } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). //! See [`ZeroMap2d`](crate::ZeroMap2d) for details. mod borrowed; mod cursor; pub(crate) mod map; #[cfg(feature = "databake")] mod databake; #[cfg(feature = "serde")] mod serde; pub use crate::ZeroMap2d; pub use borrowed::ZeroMap2dBorrowed; pub use cursor::ZeroMap2dCursor;
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use super::{ZeroMap2d, ZeroMap2dBorrowed, ZeroMap2dCursor}; use crate::map::{MutableZeroVecLike, ZeroMapKV, ZeroVecLike}; use crate::ZeroVec; use alloc::vec::Vec; use core::fmt; use core::marker::PhantomData; use serde::de::{self, Deserialize, Deserializer, MapAccess, Visitor}; #[cfg(feature = "serde")] use serde::ser::{Serialize, SerializeMap, Serializer}; /// This impl requires enabling the optional `serde` Cargo feature of the `zerovec` crate #[cfg(feature = "serde")] impl<'a, K0, K1, V> Serialize for ZeroMap2d<'a, K0, K1, V> where K0: ZeroMapKV<'a> + Serialize + ?Sized + Ord, K1: ZeroMapKV<'a> + Serialize + ?Sized + Ord, V: ZeroMapKV<'a> + Serialize + ?Sized, K0::Container: Serialize, K1::Container: Serialize, V::Container: Serialize, { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { if serializer.is_human_readable() { let mut serde_map = serializer.serialize_map(None)?; for cursor in self.iter0() { K0::Container::zvl_get_as_t(cursor.key0(), |k| serde_map.serialize_key(k))?; let inner_map = ZeroMap2dInnerMapSerialize { cursor }; serde_map.serialize_value(&inner_map)?; } serde_map.end() } else { (&self.keys0, &self.joiner, &self.keys1, &self.values).serialize(serializer) } } } /// Helper struct for human-serializing the inner map of a ZeroMap2d #[cfg(feature = "serde")] struct ZeroMap2dInnerMapSerialize<'a, 'l, K0, K1, V> where K0: ZeroMapKV<'a> + ?Sized + Ord, K1: ZeroMapKV<'a> + ?Sized + Ord, V: ZeroMapKV<'a> + ?Sized, { pub cursor: ZeroMap2dCursor<'l, 'a, K0, K1, V>, } #[cfg(feature = "serde")] impl<'a, 'l, K0, K1, V> Serialize for ZeroMap2dInnerMapSerialize<'a, 'l, K0, K1, V> where K0: ZeroMapKV<'a> + Serialize + ?Sized + Ord, K1: ZeroMapKV<'a> + Serialize + ?Sized + Ord, V: ZeroMapKV<'a> + Serialize + ?Sized, K0::Container: Serialize, K1::Container: Serialize, V::Container: Serialize, { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut serde_map = serializer.serialize_map(None)?; for (key1, v) in self.cursor.iter1() { K1::Container::zvl_get_as_t(key1, |k| serde_map.serialize_key(k))?; V::Container::zvl_get_as_t(v, |v| serde_map.serialize_value(v))?; } serde_map.end() } } /// This impl requires enabling the optional `serde` Cargo feature of the `zerovec` crate #[cfg(feature = "serde")] impl<'a, K0, K1, V> Serialize for ZeroMap2dBorrowed<'a, K0, K1, V> where K0: ZeroMapKV<'a> + Serialize + ?Sized + Ord, K1: ZeroMapKV<'a> + Serialize + ?Sized + Ord, V: ZeroMapKV<'a> + Serialize + ?Sized, K0::Container: Serialize, K1::Container: Serialize, V::Container: Serialize, { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { ZeroMap2d::<K0, K1, V>::from(*self).serialize(serializer) } } /// Modified example from https://serde.rs/deserialize-map.html struct ZeroMap2dMapVisitor<'a, K0, K1, V> where K0: ZeroMapKV<'a> + ?Sized + Ord, K1: ZeroMapKV<'a> + ?Sized + Ord, V: ZeroMapKV<'a> + ?Sized, { #[allow(clippy::type_complexity)] // it's a marker type, complexity doesn't matter marker: PhantomData<fn() -> (&'a K0::OwnedType, &'a K1::OwnedType, &'a V::OwnedType)>, } impl<'a, K0, K1, V> ZeroMap2dMapVisitor<'a, K0, K1, V> where K0: ZeroMapKV<'a> + ?Sized + Ord, K1: ZeroMapKV<'a> + ?Sized + Ord, V: ZeroMapKV<'a> + ?Sized, { fn new() -> Self { ZeroMap2dMapVisitor { marker: PhantomData, } } } impl<'a, 'de, K0, K1, V> Visitor<'de> for ZeroMap2dMapVisitor<'a, K0, K1, V> where K0: ZeroMapKV<'a> + Ord + ?Sized + Ord, K1: ZeroMapKV<'a> + Ord + ?Sized + Ord, V: ZeroMapKV<'a> + ?Sized, K1::Container: Deserialize<'de>, V::Container: Deserialize<'de>, K0::OwnedType: Deserialize<'de>, K1::OwnedType: Deserialize<'de>, V::OwnedType: Deserialize<'de>, { type Value = ZeroMap2d<'a, K0, K1, V>; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("a map produced by ZeroMap2d") } fn visit_map<M>(self, mut access: M) -> Result<Self::Value, M::Error> where M: MapAccess<'de>, { let mut map = ZeroMap2d::with_capacity(access.size_hint().unwrap_or(0)); // On the first level, pull out the K0s and a TupleVecMap of the // K1s and Vs, and then collect them into a ZeroMap2d while let Some((key0, inner_map)) = access.next_entry::<K0::OwnedType, TupleVecMap<K1::OwnedType, V::OwnedType>>()? { for (key1, value) in inner_map.entries.iter() { if map .try_append( K0::Container::owned_as_t(&key0), K1::Container::owned_as_t(key1), V::Container::owned_as_t(value), ) .is_some() { return Err(de::Error::custom( "ZeroMap2d's keys must be sorted while deserializing", )); } } } Ok(map) } } /// Helper struct for human-deserializing the inner map of a ZeroMap2d struct TupleVecMap<K1, V> { pub entries: Vec<(K1, V)>, } struct TupleVecMapVisitor<K1, V> { #[allow(clippy::type_complexity)] // it's a marker type, complexity doesn't matter marker: PhantomData<fn() -> (K1, V)>, } impl<K1, V> TupleVecMapVisitor<K1, V> { fn new() -> Self { TupleVecMapVisitor { marker: PhantomData, } } } impl<'de, K1, V> Visitor<'de> for TupleVecMapVisitor<K1, V> where K1: Deserialize<'de>, V: Deserialize<'de>, { type Value = TupleVecMap<K1, V>; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("an inner map produced by ZeroMap2d") } fn visit_map<M>(self, mut access: M) -> Result<Self::Value, M::Error> where M: MapAccess<'de>, { let mut result = Vec::with_capacity(access.size_hint().unwrap_or(0)); while let Some((key1, value)) = access.next_entry::<K1, V>()? { result.push((key1, value)); } Ok(TupleVecMap { entries: result }) } } impl<'de, K1, V> Deserialize<'de> for TupleVecMap<K1, V> where K1: Deserialize<'de>, V: Deserialize<'de>, { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_map(TupleVecMapVisitor::<K1, V>::new()) } } /// This impl requires enabling the optional `serde` Cargo feature of the `zerovec` crate impl<'de, 'a, K0, K1, V> Deserialize<'de> for ZeroMap2d<'a, K0, K1, V> where K0: ZeroMapKV<'a> + Ord + ?Sized, K1: ZeroMapKV<'a> + Ord + ?Sized, V: ZeroMapKV<'a> + ?Sized, K0::Container: Deserialize<'de>, K1::Container: Deserialize<'de>, V::Container: Deserialize<'de>, K0::OwnedType: Deserialize<'de>, K1::OwnedType: Deserialize<'de>, V::OwnedType: Deserialize<'de>, 'de: 'a, { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { if deserializer.is_human_readable() { deserializer.deserialize_map(ZeroMap2dMapVisitor::<'a, K0, K1, V>::new()) } else { let (keys0, joiner, keys1, values): ( K0::Container, ZeroVec<u32>, K1::Container, V::Container, ) = Deserialize::deserialize(deserializer)?; // Invariant 1: len(keys0) == len(joiner) if keys0.zvl_len() != joiner.len() { return Err(de::Error::custom( "Mismatched keys0 and joiner sizes in ZeroMap2d", )); } // Invariant 2: len(keys1) == len(values) if keys1.zvl_len() != values.zvl_len() { return Err(de::Error::custom( "Mismatched keys1 and value sizes in ZeroMap2d", )); } // Invariant 3: joiner is sorted if !joiner.zvl_is_ascending() { return Err(de::Error::custom( "ZeroMap2d deserializing joiner array out of order", )); } // Invariant 4: the last element of joiner is the length of keys1 if let Some(last_joiner0) = joiner.last() { if keys1.zvl_len() != last_joiner0 as usize { return Err(de::Error::custom( "ZeroMap2d deserializing joiner array malformed", )); } } let result = Self { keys0, joiner, keys1, values, }; // In debug mode, check the optional invariants, too #[cfg(debug_assertions)] result.check_invariants(); Ok(result) } } } /// This impl requires enabling the optional `serde` Cargo feature of the `zerovec` crate impl<'de, 'a, K0, K1, V> Deserialize<'de> for ZeroMap2dBorrowed<'a, K0, K1, V> where K0: ZeroMapKV<'a> + Ord + ?Sized, K1: ZeroMapKV<'a> + Ord + ?Sized, V: ZeroMapKV<'a> + ?Sized, K0::Container: Deserialize<'de>, K1::Container: Deserialize<'de>, V::Container: Deserialize<'de>, K0::OwnedType: Deserialize<'de>, K1::OwnedType: Deserialize<'de>, V::OwnedType: Deserialize<'de>, 'de: 'a, { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { if deserializer.is_human_readable() { Err(de::Error::custom( "ZeroMap2dBorrowed cannot be deserialized from human-readable formats", )) } else { let deserialized: ZeroMap2d<'a, K0, K1, V> = ZeroMap2d::deserialize(deserializer)?; let keys0 = if let Some(keys0) = deserialized.keys0.zvl_as_borrowed_inner() { keys0 } else { return Err(de::Error::custom( "ZeroMap2dBorrowed can only deserialize in zero-copy ways", )); }; let joiner = if let Some(joiner) = deserialized.joiner.zvl_as_borrowed_inner() { joiner } else { return Err(de::Error::custom( "ZeroMap2dBorrowed can only deserialize in zero-copy ways", )); }; let keys1 = if let Some(keys1) = deserialized.keys1.zvl_as_borrowed_inner() { keys1 } else { return Err(de::Error::custom( "ZeroMap2dBorrowed can only deserialize in zero-copy ways", )); }; let values = if let Some(values) = deserialized.values.zvl_as_borrowed_inner() { values } else { return Err(de::Error::custom( "ZeroMap2dBorrowed can only deserialize in zero-copy ways", )); }; Ok(Self { keys0, joiner, keys1, values, }) } } } #[cfg(test)] #[allow(non_camel_case_types)] mod test { use crate::map2d::{ZeroMap2d, ZeroMap2dBorrowed}; #[derive(serde::Serialize, serde::Deserialize)] struct DeriveTest_ZeroMap2d<'data> { #[serde(borrow)] _data: ZeroMap2d<'data, u16, str, [u8]>, } #[derive(serde::Serialize, serde::Deserialize)] struct DeriveTest_ZeroMap2dBorrowed<'data> { #[serde(borrow)] _data: ZeroMap2dBorrowed<'data, u16, str, [u8]>, } const JSON_STR: &str = "{\"1\":{\"1\":\"uno\"},\"2\":{\"2\":\"dos\",\"3\":\"tres\"}}"; const BINCODE_BYTES: &[u8] = &[ 8, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0, 3, 0, 16, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 6, 0, 117, 110, 111, 100, 111, 115, 116, 114, 101, 115, ]; fn make_map() -> ZeroMap2d<'static, u32, u16, str> { let mut map = ZeroMap2d::new(); map.insert(&1, &1, "uno"); map.insert(&2, &2, "dos"); map.insert(&2, &3, "tres"); map } #[test] fn test_serde_json() { let map = make_map(); let json_str = serde_json::to_string(&map).expect("serialize"); assert_eq!(JSON_STR, json_str); let new_map: ZeroMap2d<u32, u16, str> = serde_json::from_str(&json_str).expect("deserialize"); assert_eq!(format!("{new_map:?}"), format!("{map:?}")); } #[test] fn test_bincode() { let map = make_map(); let bincode_bytes = bincode::serialize(&map).expect("serialize"); assert_eq!(BINCODE_BYTES, bincode_bytes); let new_map: ZeroMap2d<u32, u16, str> = bincode::deserialize(&bincode_bytes).expect("deserialize"); assert_eq!( format!("{new_map:?}"), format!("{map:?}").replace("Owned", "Borrowed"), ); let new_map: ZeroMap2dBorrowed<u32, u16, str> = bincode::deserialize(&bincode_bytes).expect("deserialize"); assert_eq!( format!("{new_map:?}"), format!("{map:?}") .replace("Owned", "Borrowed") .replace("ZeroMap2d", "ZeroMap2dBorrowed") ); } #[test] fn test_serde_rmp() { let map = make_map(); let rmp_buf = rmp_serde::to_vec(&map).expect("serialize"); let new_map: ZeroMap2d<u32, u16, str> = rmp_serde::from_slice(&rmp_buf).unwrap(); assert_eq!(map, new_map); } #[test] fn test_sample_bincode() { // This is the map from the main docs page for ZeroMap2d let mut map: ZeroMap2d<u16, u16, str> = ZeroMap2d::new(); map.insert(&1, &2, "three"); let bincode_bytes: Vec<u8> = bincode::serialize(&map).expect("serialize"); assert_eq!( bincode_bytes.as_slice(), &[ 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 2, 0, 7, 0, 0, 0, 0, 0, 0, 0, 1, 0, 116, 104, 114, 101, 101 ] ); } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). //! Example data useful for testing ZeroVec. // This module is included directly in tests and can trigger the dead_code // warning since not all samples are used in each test #![allow(dead_code)] #[repr(align(8))] struct Aligned<T>(pub T); // This is aligned so that we can test unaligned behavior at odd offsets const ALIGNED_TEST_BUFFER_LE: Aligned<[u8; 80]> = Aligned([ 0x00, 0x01, 0x02, 0x00, 0x04, 0x05, 0x06, 0x00, 0x08, 0x09, 0x0a, 0x00, 0x0c, 0x0d, 0x0e, 0x00, 0x10, 0x11, 0x12, 0x00, 0x14, 0x15, 0x16, 0x00, 0x18, 0x19, 0x1a, 0x00, 0x1c, 0x1d, 0x1e, 0x00, 0x20, 0x21, 0x22, 0x00, 0x24, 0x25, 0x26, 0x00, 0x28, 0x29, 0x2a, 0x00, 0x2c, 0x2d, 0x2e, 0x00, 0x30, 0x31, 0x32, 0x00, 0x34, 0x35, 0x36, 0x00, 0x38, 0x39, 0x3a, 0x00, 0x3c, 0x3d, 0x3e, 0x00, 0x40, 0x41, 0x42, 0x00, 0x44, 0x45, 0x46, 0x00, 0x48, 0x49, 0x4a, 0x00, 0x4c, 0x4d, 0x4e, 0x00, ]); /// An example byte array intended to be used in `ZeroVec<u32>`. pub const TEST_BUFFER_LE: &[u8] = &ALIGNED_TEST_BUFFER_LE.0; /// u32 numbers corresponding to the above byte array. pub const TEST_SLICE: &[u32] = &[ 0x020100, 0x060504, 0x0a0908, 0x0e0d0c, 0x121110, 0x161514, 0x1a1918, 0x1e1d1c, 0x222120, 0x262524, 0x2a2928, 0x2e2d2c, 0x323130, 0x363534, 0x3a3938, 0x3e3d3c, 0x424140, 0x464544, 0x4a4948, 0x4e4d4c, ]; /// The sum of the numbers in TEST_SLICE. pub const TEST_SUM: u32 = 52629240; /// Representation of TEST_SLICE in JSON. pub const JSON_STR: &str = "[131328,394500,657672,920844,1184016,1447188,1710360,1973532,2236704,2499876,2763048,3026220,3289392,3552564,3815736,4078908,4342080,4605252,4868424,5131596]"; /// Representation of TEST_SLICE in Bincode. pub const BINCODE_BUF: &[u8] = &[ 80, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 4, 5, 6, 0, 8, 9, 10, 0, 12, 13, 14, 0, 16, 17, 18, 0, 20, 21, 22, 0, 24, 25, 26, 0, 28, 29, 30, 0, 32, 33, 34, 0, 36, 37, 38, 0, 40, 41, 42, 0, 44, 45, 46, 0, 48, 49, 50, 0, 52, 53, 54, 0, 56, 57, 58, 0, 60, 61, 62, 0, 64, 65, 66, 0, 68, 69, 70, 0, 72, 73, 74, 0, 76, 77, 78, 0, ]; /// Representation of a VarZeroVec<str> with contents ["w", "ω", "文", "𑄃"] pub const TEST_VARZEROSLICE_BYTES: &[u8] = &[ 4, 0, 0, 0, 0, 0, 1, 0, 3, 0, 6, 0, 119, 207, 137, 230, 150, 135, 240, 145, 132, 131, ]; #[test] fn validate() { use crate::{VarZeroVec, ZeroVec}; assert_eq!( ZeroVec::<u32>::parse_bytes(TEST_BUFFER_LE).unwrap(), ZeroVec::alloc_from_slice(TEST_SLICE) ); assert_eq!(TEST_SLICE.iter().sum::<u32>(), TEST_SUM); assert_eq!( serde_json::from_str::<ZeroVec::<u32>>(JSON_STR).unwrap(), ZeroVec::alloc_from_slice(TEST_SLICE) ); assert_eq!( bincode::deserialize::<ZeroVec::<u32>>(BINCODE_BUF).unwrap(), ZeroVec::alloc_from_slice(TEST_SLICE) ); VarZeroVec::<str>::parse_bytes(TEST_VARZEROSLICE_BYTES).unwrap(); }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). #![allow(clippy::upper_case_acronyms)] //! ULE implementation for the `char` type. use super::*; use crate::impl_ule_from_array; use core::cmp::Ordering; use core::convert::TryFrom; /// A u8 array of little-endian data corresponding to a Unicode scalar value. /// /// The bytes of a `CharULE` are guaranteed to represent a little-endian-encoded u32 that is a /// valid `char` and can be converted without validation. /// /// # Examples /// /// Convert a `char` to a `CharULE` and back again: /// /// ``` /// use zerovec::ule::{AsULE, CharULE, ULE}; /// /// let c1 = '𑄃'; /// let ule = c1.to_unaligned(); /// assert_eq!(CharULE::slice_as_bytes(&[ule]), &[0x03, 0x11, 0x01]); /// let c2 = char::from_unaligned(ule); /// assert_eq!(c1, c2); /// ``` /// /// Attempt to parse invalid bytes to a `CharULE`: /// /// ``` /// use zerovec::ule::{CharULE, ULE}; /// /// let bytes: &[u8] = &[0xFF, 0xFF, 0xFF, 0xFF]; /// CharULE::parse_bytes_to_slice(bytes).expect_err("Invalid bytes"); /// ``` #[repr(transparent)] #[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)] pub struct CharULE([u8; 3]); impl CharULE { /// Converts a [`char`] to a [`CharULE`]. This is equivalent to calling /// [`AsULE::to_unaligned()`] /// /// See the type-level documentation for [`CharULE`] for more information. #[inline] pub const fn from_aligned(c: char) -> Self { let [u0, u1, u2, _u3] = (c as u32).to_le_bytes(); Self([u0, u1, u2]) } /// Converts this [`CharULE`] to a [`char`]. This is equivalent to calling /// [`AsULE::from_unaligned`] /// /// See the type-level documentation for [`CharULE`] for more information. #[inline] pub fn to_char(self) -> char { let [b0, b1, b2] = self.0; // Safe because the bytes of CharULE are defined to represent a valid Unicode scalar value. unsafe { char::from_u32_unchecked(u32::from_le_bytes([b0, b1, b2, 0])) } } impl_ule_from_array!(char, CharULE, Self([0; 3])); } // Safety (based on the safety checklist on the ULE trait): // 1. CharULE does not include any uninitialized or padding bytes. // (achieved by `#[repr(transparent)]` on a type that satisfies this invariant) // 2. CharULE is aligned to 1 byte. // (achieved by `#[repr(transparent)]` on a type that satisfies this invariant) // 3. The impl of validate_bytes() returns an error if any byte is not valid. // 4. The impl of validate_bytes() returns an error if there are extra bytes. // 5. The other ULE methods use the default impl. // 6. CharULE byte equality is semantic equality unsafe impl ULE for CharULE { #[inline] fn validate_bytes(bytes: &[u8]) -> Result<(), UleError> { if bytes.len() % 3 != 0 { return Err(UleError::length::<Self>(bytes.len())); } // Validate the bytes for chunk in bytes.chunks_exact(3) { // TODO: Use slice::as_chunks() when stabilized #[allow(clippy::indexing_slicing)] // Won't panic because the chunks are always 3 bytes long let u = u32::from_le_bytes([chunk[0], chunk[1], chunk[2], 0]); char::try_from(u).map_err(|_| UleError::parse::<Self>())?; } Ok(()) } } impl AsULE for char { type ULE = CharULE; #[inline] fn to_unaligned(self) -> Self::ULE { CharULE::from_aligned(self) } #[inline] fn from_unaligned(unaligned: Self::ULE) -> Self { unaligned.to_char() } } impl PartialOrd for CharULE { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for CharULE { fn cmp(&self, other: &Self) -> Ordering { char::from_unaligned(*self).cmp(&char::from_unaligned(*other)) } } #[cfg(test)] mod test { use super::*; #[test] fn test_from_array() { const CHARS: [char; 2] = ['a', '🙃']; const CHARS_ULE: [CharULE; 2] = CharULE::from_array(CHARS); assert_eq!( CharULE::slice_as_bytes(&CHARS_ULE), &[0x61, 0x00, 0x00, 0x43, 0xF6, 0x01] ); } #[test] fn test_from_array_zst() { const CHARS: [char; 0] = []; const CHARS_ULE: [CharULE; 0] = CharULE::from_array(CHARS); let bytes = CharULE::slice_as_bytes(&CHARS_ULE); let empty: &[u8] = &[]; assert_eq!(bytes, empty); } #[test] fn test_parse() { // 1-byte, 2-byte, 3-byte, and two 4-byte character in UTF-8 (not as relevant in UTF-32) let chars = ['w', 'ω', '文', '𑄃', '🙃']; let char_ules: Vec<CharULE> = chars.iter().copied().map(char::to_unaligned).collect(); let char_bytes: &[u8] = CharULE::slice_as_bytes(&char_ules); // Check parsing let parsed_ules: &[CharULE] = CharULE::parse_bytes_to_slice(char_bytes).unwrap(); assert_eq!(char_ules, parsed_ules); let parsed_chars: Vec<char> = parsed_ules .iter() .copied() .map(char::from_unaligned) .collect(); assert_eq!(&chars, parsed_chars.as_slice()); // Compare to golden expected data assert_eq!( &[119, 0, 0, 201, 3, 0, 135, 101, 0, 3, 17, 1, 67, 246, 1], char_bytes ); } #[test] fn test_failures() { // 119 and 120 are valid, but not 0xD800 (high surrogate) let u32s = [119, 0xD800, 120]; let u32_ules: Vec<RawBytesULE<4>> = u32s .iter() .copied() .map(<u32 as AsULE>::to_unaligned) .collect(); let u32_bytes: &[u8] = RawBytesULE::<4>::slice_as_bytes(&u32_ules); let parsed_ules_result = CharULE::parse_bytes_to_slice(u32_bytes); assert!(parsed_ules_result.is_err()); // 0x20FFFF is out of range for a char let u32s = [0x20FFFF]; let u32_ules: Vec<RawBytesULE<4>> = u32s .iter() .copied() .map(<u32 as AsULE>::to_unaligned) .collect(); let u32_bytes: &[u8] = RawBytesULE::<4>::slice_as_bytes(&u32_ules); let parsed_ules_result = CharULE::parse_bytes_to_slice(u32_bytes); assert!(parsed_ules_result.is_err()); } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). //! Documentation on implementing custom VarULE types. //! //! This module contains documentation for defining custom VarULE types, //! especially those using complex custom dynamically sized types. //! //! In *most cases* you should be able to create custom VarULE types using //! [`#[make_varule]`](crate::make_ule). //! //! # Example //! //! For example, if your regular stack type is: //! //! ```rust //! use zerofrom::ZeroFrom; //! use zerovec::ule::*; //! use zerovec::ZeroVec; //! //! #[derive(serde::Serialize, serde::Deserialize)] //! struct Foo<'a> { //! field1: char, //! field2: u32, //! #[serde(borrow)] //! field3: ZeroVec<'a, u32>, //! } //! ``` //! //! then the ULE type will be implemented as follows. Ideally, you should have //! `EncodeAsVarULE` and `ZeroFrom` implementations on `Foo` pertaining to `FooULE`, //! as well as a `Serialize` impl on `FooULE` and a `Deserialize` impl on `Box<FooULE>` //! to enable human-readable serialization and deserialization. //! //! ```rust //! use zerovec::{ZeroVec, VarZeroVec, ZeroSlice}; //! use zerovec::ule::*; //! use zerofrom::ZeroFrom; //! use core::mem; //! //! # #[derive(serde::Serialize, serde::Deserialize)] //! # struct Foo<'a> { //! # field1: char, //! # field2: u32, //! # #[serde(borrow)] //! # field3: ZeroVec<'a, u32> //! # } //! //! // Must be repr(C, packed) for safety of VarULE! //! // Must also only contain ULE types //! #[repr(C, packed)] //! struct FooULE { //! field1: <char as AsULE>::ULE, //! field2: <u32 as AsULE>::ULE, //! field3: ZeroSlice<u32>, //! } //! //! // Safety (based on the safety checklist on the VarULE trait): //! // 1. FooULE does not include any uninitialized or padding bytes. (achieved by `#[repr(C, packed)]` on //! // a struct with only ULE fields) //! // 2. FooULE is aligned to 1 byte. (achieved by `#[repr(C, packed)]` on //! // a struct with only ULE fields) //! // 3. The impl of `validate_bytes()` returns an error if any byte is not valid. //! // 4. The impl of `validate_bytes()` returns an error if the slice cannot be used in its entirety //! // 5. The impl of `from_bytes_unchecked()` returns a reference to the same data. //! // 6. The other VarULE methods use the default impl. //! // 7. FooULE byte equality is semantic equality //! unsafe impl VarULE for FooULE { //! fn validate_bytes(bytes: &[u8]) -> Result<(), UleError> { //! // validate each field //! <char as AsULE>::ULE::validate_bytes(&bytes[0..3]).map_err(|_| UleError::parse::<Self>())?; //! <u32 as AsULE>::ULE::validate_bytes(&bytes[3..7]).map_err(|_| UleError::parse::<Self>())?; //! let _ = ZeroVec::<u32>::parse_bytes(&bytes[7..]).map_err(|_| UleError::parse::<Self>())?; //! Ok(()) //! } //! unsafe fn from_bytes_unchecked(bytes: &[u8]) -> &Self { //! let ptr = bytes.as_ptr(); //! let len = bytes.len(); //! // subtract the length of the char and u32 to get the length of the array //! let len_new = (len - 7) / 4; //! // it's hard constructing custom DSTs, we fake a pointer/length construction //! // eventually we can use the Pointer::Metadata APIs when they stabilize //! let fake_slice = core::ptr::slice_from_raw_parts(ptr as *const <u32 as AsULE>::ULE, len_new); //! &*(fake_slice as *const Self) //! } //! } //! //! unsafe impl EncodeAsVarULE<FooULE> for Foo<'_> { //! fn encode_var_ule_as_slices<R>(&self, cb: impl FnOnce(&[&[u8]]) -> R) -> R { //! // take each field, convert to ULE byte slices, and pass them through //! cb(&[<char as AsULE>::ULE::slice_as_bytes(&[self.field1.to_unaligned()]), //! <u32 as AsULE>::ULE::slice_as_bytes(&[self.field2.to_unaligned()]), //! // the ZeroVec is already in the correct slice format //! self.field3.as_bytes()]) //! } //! } //! //! impl<'a> ZeroFrom<'a, FooULE> for Foo<'a> { //! fn zero_from(other: &'a FooULE) -> Self { //! Self { //! field1: AsULE::from_unaligned(other.field1), //! field2: AsULE::from_unaligned(other.field2), //! field3: ZeroFrom::zero_from(&other.field3), //! } //! } //! } //! //! //! impl serde::Serialize for FooULE //! { //! fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> //! where //! S: serde::Serializer, //! { //! Foo::zero_from(self).serialize(serializer) //! } //! } //! //! impl<'de> serde::Deserialize<'de> for Box<FooULE> //! { //! fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> //! where //! D: serde::Deserializer<'de>, //! { //! let mut foo = Foo::deserialize(deserializer)?; //! Ok(encode_varule_to_box(&foo)) //! } //! } //! //! fn main() { //! let mut foos = [Foo {field1: 'u', field2: 983, field3: ZeroVec::alloc_from_slice(&[1212,2309,500,7000])}, //! Foo {field1: 'l', field2: 1010, field3: ZeroVec::alloc_from_slice(&[1932, 0, 8888, 91237])}]; //! //! let vzv = VarZeroVec::<_>::from(&foos); //! //! assert_eq!(char::from_unaligned(vzv.get(0).unwrap().field1), 'u'); //! assert_eq!(u32::from_unaligned(vzv.get(0).unwrap().field2), 983); //! assert_eq!(&vzv.get(0).unwrap().field3, &[1212,2309,500,7000][..]); //! //! assert_eq!(char::from_unaligned(vzv.get(1).unwrap().field1), 'l'); //! assert_eq!(u32::from_unaligned(vzv.get(1).unwrap().field2), 1010); //! assert_eq!(&vzv.get(1).unwrap().field3, &[1932, 0, 8888, 91237][..]); //! } //! ```
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use crate::ule::*; use crate::varzerovec::VarZeroVecFormat; use crate::{VarZeroSlice, VarZeroVec, ZeroSlice, ZeroVec}; #[cfg(feature = "alloc")] use alloc::borrow::{Cow, ToOwned}; #[cfg(feature = "alloc")] use alloc::boxed::Box; #[cfg(feature = "alloc")] use alloc::string::String; #[cfg(feature = "alloc")] use alloc::{vec, vec::Vec}; #[cfg(feature = "alloc")] use core::mem; /// Allows types to be encoded as VarULEs. This is highly useful for implementing VarULE on /// custom DSTs where the type cannot be obtained as a reference to some other type. /// /// [`Self::encode_var_ule_as_slices()`] should be implemented by providing an encoded slice for each field /// of the VarULE type to the callback, in order. For an implementation to be safe, the slices /// to the callback must, when concatenated, be a valid instance of the VarULE type. /// /// See the [custom VarULEdocumentation](crate::ule::custom) for examples. /// /// [`Self::encode_var_ule_as_slices()`] is only used to provide default implementations for [`Self::encode_var_ule_write()`] /// and [`Self::encode_var_ule_len()`]. If you override the default implementations it is totally valid to /// replace [`Self::encode_var_ule_as_slices()`]'s body with `unreachable!()`. This can be done for cases where /// it is not possible to implement [`Self::encode_var_ule_as_slices()`] but the other methods still work. /// /// A typical implementation will take each field in the order found in the [`VarULE`] type, /// convert it to ULE, call [`ULE::slice_as_bytes()`] on them, and pass the slices to `cb` in order. /// A trailing [`ZeroVec`](crate::ZeroVec) or [`VarZeroVec`](crate::VarZeroVec) can have their underlying /// byte representation passed through. /// /// In case the compiler is not optimizing [`Self::encode_var_ule_len()`], it can be overridden. A typical /// implementation will add up the sizes of each field on the [`VarULE`] type and then add in the byte length of the /// dynamically-sized part. /// /// # Safety /// /// The safety invariants of [`Self::encode_var_ule_as_slices()`] are: /// - It must call `cb` (only once) /// - The slices passed to `cb`, if concatenated, should be a valid instance of the `T` [`VarULE`] type /// (i.e. if fed to [`VarULE::validate_bytes()`] they must produce a successful result) /// - It must return the return value of `cb` to the caller /// /// One or more of [`Self::encode_var_ule_len()`] and [`Self::encode_var_ule_write()`] may be provided. /// If both are, then `zerovec` code is guaranteed to not call [`Self::encode_var_ule_as_slices()`], and it may be replaced /// with `unreachable!()`. /// /// The safety invariants of [`Self::encode_var_ule_len()`] are: /// - It must return the length of the corresponding VarULE type /// /// The safety invariants of [`Self::encode_var_ule_write()`] are: /// - The slice written to `dst` must be a valid instance of the `T` [`VarULE`] type pub unsafe trait EncodeAsVarULE<T: VarULE + ?Sized> { /// Calls `cb` with a piecewise list of byte slices that when concatenated /// produce the memory pattern of the corresponding instance of `T`. /// /// Do not call this function directly; instead use the other two. Some implementors /// may define this function to panic. fn encode_var_ule_as_slices<R>(&self, cb: impl FnOnce(&[&[u8]]) -> R) -> R; /// Return the length, in bytes, of the corresponding [`VarULE`] type fn encode_var_ule_len(&self) -> usize { self.encode_var_ule_as_slices(|slices| slices.iter().map(|s| s.len()).sum()) } /// Write the corresponding [`VarULE`] type to the `dst` buffer. `dst` should /// be the size of [`Self::encode_var_ule_len()`] fn encode_var_ule_write(&self, mut dst: &mut [u8]) { debug_assert_eq!(self.encode_var_ule_len(), dst.len()); self.encode_var_ule_as_slices(move |slices| { #[allow(clippy::indexing_slicing)] // by debug_assert for slice in slices { dst[..slice.len()].copy_from_slice(slice); dst = &mut dst[slice.len()..]; } }); } } /// Given an [`EncodeAsVarULE`] type `S`, encode it into a `Box<T>` /// /// This is primarily useful for generating `Deserialize` impls for VarULE types #[cfg(feature = "alloc")] pub fn encode_varule_to_box<S: EncodeAsVarULE<T> + ?Sized, T: VarULE + ?Sized>(x: &S) -> Box<T> { // zero-fill the vector to avoid uninitialized data UB let mut vec: Vec<u8> = vec![0; x.encode_var_ule_len()]; x.encode_var_ule_write(&mut vec); let boxed = mem::ManuallyDrop::new(vec.into_boxed_slice()); unsafe { // Safety: `ptr` is a box, and `T` is a VarULE which guarantees it has the same memory layout as `[u8]` // and can be recouped via from_bytes_unchecked() let ptr: *mut T = T::from_bytes_unchecked(&boxed) as *const T as *mut T; // Safety: we can construct an owned version since we have mem::forgotten the older owner Box::from_raw(ptr) } } unsafe impl<T: VarULE + ?Sized> EncodeAsVarULE<T> for T { fn encode_var_ule_as_slices<R>(&self, cb: impl FnOnce(&[&[u8]]) -> R) -> R { cb(&[T::as_bytes(self)]) } } unsafe impl<T: VarULE + ?Sized> EncodeAsVarULE<T> for &'_ T { fn encode_var_ule_as_slices<R>(&self, cb: impl FnOnce(&[&[u8]]) -> R) -> R { cb(&[T::as_bytes(self)]) } } unsafe impl<T: VarULE + ?Sized> EncodeAsVarULE<T> for &'_ &'_ T { fn encode_var_ule_as_slices<R>(&self, cb: impl FnOnce(&[&[u8]]) -> R) -> R { cb(&[T::as_bytes(self)]) } } #[cfg(feature = "alloc")] unsafe impl<T: VarULE + ?Sized> EncodeAsVarULE<T> for Cow<'_, T> where T: ToOwned, { fn encode_var_ule_as_slices<R>(&self, cb: impl FnOnce(&[&[u8]]) -> R) -> R { cb(&[T::as_bytes(self.as_ref())]) } } #[cfg(feature = "alloc")] unsafe impl<T: VarULE + ?Sized> EncodeAsVarULE<T> for Box<T> { fn encode_var_ule_as_slices<R>(&self, cb: impl FnOnce(&[&[u8]]) -> R) -> R { cb(&[T::as_bytes(self)]) } } #[cfg(feature = "alloc")] unsafe impl<T: VarULE + ?Sized> EncodeAsVarULE<T> for &'_ Box<T> { fn encode_var_ule_as_slices<R>(&self, cb: impl FnOnce(&[&[u8]]) -> R) -> R { cb(&[T::as_bytes(self)]) } } #[cfg(feature = "alloc")] unsafe impl EncodeAsVarULE<str> for String { fn encode_var_ule_as_slices<R>(&self, cb: impl FnOnce(&[&[u8]]) -> R) -> R { cb(&[self.as_bytes()]) } } #[cfg(feature = "alloc")] unsafe impl EncodeAsVarULE<str> for &'_ String { fn encode_var_ule_as_slices<R>(&self, cb: impl FnOnce(&[&[u8]]) -> R) -> R { cb(&[self.as_bytes()]) } } // Note: This impl could technically use `T: AsULE`, but we want users to prefer `ZeroSlice<T>` // for cases where T is not a ULE. Therefore, we can use the more efficient `memcpy` impl here. #[cfg(feature = "alloc")] unsafe impl<T> EncodeAsVarULE<[T]> for Vec<T> where T: ULE, { fn encode_var_ule_as_slices<R>(&self, cb: impl FnOnce(&[&[u8]]) -> R) -> R { cb(&[<[T] as VarULE>::as_bytes(self)]) } } unsafe impl<T> EncodeAsVarULE<ZeroSlice<T>> for &'_ [T] where T: AsULE + 'static, { fn encode_var_ule_as_slices<R>(&self, _: impl FnOnce(&[&[u8]]) -> R) -> R { // unnecessary if the other two are implemented unreachable!() } #[inline] fn encode_var_ule_len(&self) -> usize { self.len() * core::mem::size_of::<T::ULE>() } fn encode_var_ule_write(&self, dst: &mut [u8]) { #[allow(non_snake_case)] let S = core::mem::size_of::<T::ULE>(); debug_assert_eq!(self.len() * S, dst.len()); for (item, ref mut chunk) in self.iter().zip(dst.chunks_mut(S)) { let ule = item.to_unaligned(); chunk.copy_from_slice(ULE::slice_as_bytes(core::slice::from_ref(&ule))); } } } #[cfg(feature = "alloc")] unsafe impl<T> EncodeAsVarULE<ZeroSlice<T>> for Vec<T> where T: AsULE + 'static, { fn encode_var_ule_as_slices<R>(&self, _: impl FnOnce(&[&[u8]]) -> R) -> R { // unnecessary if the other two are implemented unreachable!() } #[inline] fn encode_var_ule_len(&self) -> usize { self.as_slice().encode_var_ule_len() } #[inline] fn encode_var_ule_write(&self, dst: &mut [u8]) { self.as_slice().encode_var_ule_write(dst) } } unsafe impl<T> EncodeAsVarULE<ZeroSlice<T>> for ZeroVec<'_, T> where T: AsULE + 'static, { fn encode_var_ule_as_slices<R>(&self, _: impl FnOnce(&[&[u8]]) -> R) -> R { // unnecessary if the other two are implemented unreachable!() } #[inline] fn encode_var_ule_len(&self) -> usize { self.as_bytes().len() } fn encode_var_ule_write(&self, dst: &mut [u8]) { debug_assert_eq!(self.as_bytes().len(), dst.len()); dst.copy_from_slice(self.as_bytes()); } } unsafe impl<T, E, F> EncodeAsVarULE<VarZeroSlice<T, F>> for &'_ [E] where T: VarULE + ?Sized, E: EncodeAsVarULE<T>, F: VarZeroVecFormat, { fn encode_var_ule_as_slices<R>(&self, _: impl FnOnce(&[&[u8]]) -> R) -> R { // unnecessary if the other two are implemented unimplemented!() } #[allow(clippy::unwrap_used)] // TODO(#1410): Rethink length errors in VZV. fn encode_var_ule_len(&self) -> usize { crate::varzerovec::components::compute_serializable_len::<T, E, F>(self).unwrap() as usize } fn encode_var_ule_write(&self, dst: &mut [u8]) { crate::varzerovec::components::write_serializable_bytes::<T, E, F>(self, dst) } } #[cfg(feature = "alloc")] unsafe impl<T, E, F> EncodeAsVarULE<VarZeroSlice<T, F>> for Vec<E> where T: VarULE + ?Sized, E: EncodeAsVarULE<T>, F: VarZeroVecFormat, { fn encode_var_ule_as_slices<R>(&self, _: impl FnOnce(&[&[u8]]) -> R) -> R { // unnecessary if the other two are implemented unreachable!() } #[inline] fn encode_var_ule_len(&self) -> usize { <_ as EncodeAsVarULE<VarZeroSlice<T, F>>>::encode_var_ule_len(&self.as_slice()) } #[inline] fn encode_var_ule_write(&self, dst: &mut [u8]) { <_ as EncodeAsVarULE<VarZeroSlice<T, F>>>::encode_var_ule_write(&self.as_slice(), dst) } } unsafe impl<T, F> EncodeAsVarULE<VarZeroSlice<T, F>> for VarZeroVec<'_, T, F> where T: VarULE + ?Sized, F: VarZeroVecFormat, { fn encode_var_ule_as_slices<R>(&self, _: impl FnOnce(&[&[u8]]) -> R) -> R { // unnecessary if the other two are implemented unreachable!() } #[inline] fn encode_var_ule_len(&self) -> usize { self.as_bytes().len() } #[inline] fn encode_var_ule_write(&self, dst: &mut [u8]) { debug_assert_eq!(self.as_bytes().len(), dst.len()); dst.copy_from_slice(self.as_bytes()); } } #[cfg(test)] mod test { use super::*; const STRING_ARRAY: [&str; 2] = ["hello", "world"]; const STRING_SLICE: &[&str] = &STRING_ARRAY; const U8_ARRAY: [u8; 8] = [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07]; const U8_2D_ARRAY: [&[u8]; 2] = [&U8_ARRAY, &U8_ARRAY]; const U8_2D_SLICE: &[&[u8]] = &[&U8_ARRAY, &U8_ARRAY]; const U8_3D_ARRAY: [&[&[u8]]; 2] = [U8_2D_SLICE, U8_2D_SLICE]; const U8_3D_SLICE: &[&[&[u8]]] = &[U8_2D_SLICE, U8_2D_SLICE]; const U32_ARRAY: [u32; 4] = [0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F]; const U32_2D_ARRAY: [&[u32]; 2] = [&U32_ARRAY, &U32_ARRAY]; const U32_2D_SLICE: &[&[u32]] = &[&U32_ARRAY, &U32_ARRAY]; const U32_3D_ARRAY: [&[&[u32]]; 2] = [U32_2D_SLICE, U32_2D_SLICE]; const U32_3D_SLICE: &[&[&[u32]]] = &[U32_2D_SLICE, U32_2D_SLICE]; #[test] fn test_vzv_from() { type VZV<'a, T> = VarZeroVec<'a, T>; type ZS<T> = ZeroSlice<T>; type VZS<T> = VarZeroSlice<T>; let u8_zerovec: ZeroVec<u8> = ZeroVec::from_slice_or_alloc(&U8_ARRAY); let u8_2d_zerovec: [ZeroVec<u8>; 2] = [u8_zerovec.clone(), u8_zerovec.clone()]; let u8_2d_vec: Vec<Vec<u8>> = vec![U8_ARRAY.into(), U8_ARRAY.into()]; let u8_3d_vec: Vec<Vec<Vec<u8>>> = vec![u8_2d_vec.clone(), u8_2d_vec.clone()]; let u32_zerovec: ZeroVec<u32> = ZeroVec::from_slice_or_alloc(&U32_ARRAY); let u32_2d_zerovec: [ZeroVec<u32>; 2] = [u32_zerovec.clone(), u32_zerovec.clone()]; let u32_2d_vec: Vec<Vec<u32>> = vec![U32_ARRAY.into(), U32_ARRAY.into()]; let u32_3d_vec: Vec<Vec<Vec<u32>>> = vec![u32_2d_vec.clone(), u32_2d_vec.clone()]; let a: VZV<str> = VarZeroVec::from(&STRING_ARRAY); let b: VZV<str> = VarZeroVec::from(STRING_SLICE); let c: VZV<str> = VarZeroVec::from(&Vec::from(STRING_SLICE)); assert_eq!(a, STRING_SLICE); assert_eq!(a, b); assert_eq!(a, c); let a: VZV<[u8]> = VarZeroVec::from(&U8_2D_ARRAY); let b: VZV<[u8]> = VarZeroVec::from(U8_2D_SLICE); let c: VZV<[u8]> = VarZeroVec::from(&u8_2d_vec); assert_eq!(a, U8_2D_SLICE); assert_eq!(a, b); assert_eq!(a, c); let u8_3d_vzv_brackets = &[a.clone(), a.clone()]; let a: VZV<ZS<u8>> = VarZeroVec::from(&U8_2D_ARRAY); let b: VZV<ZS<u8>> = VarZeroVec::from(U8_2D_SLICE); let c: VZV<ZS<u8>> = VarZeroVec::from(&u8_2d_vec); let d: VZV<ZS<u8>> = VarZeroVec::from(&u8_2d_zerovec); assert_eq!(a, U8_2D_SLICE); assert_eq!(a, b); assert_eq!(a, c); assert_eq!(a, d); let u8_3d_vzv_zeroslice = &[a.clone(), a.clone()]; let a: VZV<VZS<[u8]>> = VarZeroVec::from(&U8_3D_ARRAY); let b: VZV<VZS<[u8]>> = VarZeroVec::from(U8_3D_SLICE); let c: VZV<VZS<[u8]>> = VarZeroVec::from(&u8_3d_vec); let d: VZV<VZS<[u8]>> = VarZeroVec::from(u8_3d_vzv_brackets); assert_eq!( a.iter() .map(|x| x.iter().map(|y| y.to_vec()).collect::<Vec<Vec<u8>>>()) .collect::<Vec<Vec<Vec<u8>>>>(), u8_3d_vec ); assert_eq!(a, b); assert_eq!(a, c); assert_eq!(a, d); let a: VZV<VZS<ZS<u8>>> = VarZeroVec::from(&U8_3D_ARRAY); let b: VZV<VZS<ZS<u8>>> = VarZeroVec::from(U8_3D_SLICE); let c: VZV<VZS<ZS<u8>>> = VarZeroVec::from(&u8_3d_vec); let d: VZV<VZS<ZS<u8>>> = VarZeroVec::from(u8_3d_vzv_zeroslice); assert_eq!( a.iter() .map(|x| x .iter() .map(|y| y.iter().collect::<Vec<u8>>()) .collect::<Vec<Vec<u8>>>()) .collect::<Vec<Vec<Vec<u8>>>>(), u8_3d_vec ); assert_eq!(a, b); assert_eq!(a, c); assert_eq!(a, d); let a: VZV<ZS<u32>> = VarZeroVec::from(&U32_2D_ARRAY); let b: VZV<ZS<u32>> = VarZeroVec::from(U32_2D_SLICE); let c: VZV<ZS<u32>> = VarZeroVec::from(&u32_2d_vec); let d: VZV<ZS<u32>> = VarZeroVec::from(&u32_2d_zerovec); assert_eq!(a, u32_2d_zerovec); assert_eq!(a, b); assert_eq!(a, c); assert_eq!(a, d); let u32_3d_vzv = &[a.clone(), a.clone()]; let a: VZV<VZS<ZS<u32>>> = VarZeroVec::from(&U32_3D_ARRAY); let b: VZV<VZS<ZS<u32>>> = VarZeroVec::from(U32_3D_SLICE); let c: VZV<VZS<ZS<u32>>> = VarZeroVec::from(&u32_3d_vec); let d: VZV<VZS<ZS<u32>>> = VarZeroVec::from(u32_3d_vzv); assert_eq!( a.iter() .map(|x| x .iter() .map(|y| y.iter().collect::<Vec<u32>>()) .collect::<Vec<Vec<u32>>>()) .collect::<Vec<Vec<Vec<u32>>>>(), u32_3d_vec ); assert_eq!(a, b); assert_eq!(a, c); assert_eq!(a, d); } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). /// Given `Self` (`$aligned`), `Self::ULE` (`$unaligned`), and a conversion function (`$single` or /// `Self::from_aligned`), implement `from_array` for arrays of `$aligned` to `$unaligned`. /// /// The `$default` argument is due to current compiler limitations. /// Pass any (cheap to construct) value. #[macro_export] macro_rules! impl_ule_from_array { ($aligned:ty, $unaligned:ty, $default:expr, $single:path) => { #[doc = concat!("Convert an array of `", stringify!($aligned), "` to an array of `", stringify!($unaligned), "`.")] pub const fn from_array<const N: usize>(arr: [$aligned; N]) -> [Self; N] { let mut result = [$default; N]; let mut i = 0; // Won't panic because i < N and arr has length N #[allow(clippy::indexing_slicing)] while i < N { result[i] = $single(arr[i]); i += 1; } result } }; ($aligned:ty, $unaligned:ty, $default:expr) => { impl_ule_from_array!($aligned, $unaligned, $default, Self::from_aligned); }; }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). #![allow(clippy::upper_case_acronyms)] //! Traits over unaligned little-endian data (ULE, pronounced "yule"). //! //! The main traits for this module are [`ULE`], [`AsULE`] and, [`VarULE`]. //! //! See [the design doc](https://github.com/unicode-org/icu4x/blob/main/utils/zerovec/design_doc.md) for details on how these traits //! works under the hood. mod chars; #[cfg(doc)] pub mod custom; mod encode; mod macros; mod multi; mod niche; mod option; mod plain; mod slices; #[cfg(test)] pub mod test_utils; pub mod tuple; pub mod tuplevar; pub mod vartuple; pub use chars::CharULE; #[cfg(feature = "alloc")] pub use encode::encode_varule_to_box; pub use encode::EncodeAsVarULE; pub use multi::MultiFieldsULE; pub use niche::{NicheBytes, NichedOption, NichedOptionULE}; pub use option::{OptionULE, OptionVarULE}; pub use plain::RawBytesULE; use core::{any, fmt, mem, slice}; /// Fixed-width, byte-aligned data that can be cast to and from a little-endian byte slice. /// /// If you need to implement this trait, consider using [`#[make_ule]`](crate::make_ule) or /// [`#[derive(ULE)]`](macro@ULE) instead. /// /// Types that are not fixed-width can implement [`VarULE`] instead. /// /// "ULE" stands for "Unaligned little-endian" /// /// # Safety /// /// Safety checklist for `ULE`: /// /// 1. The type *must not* include any uninitialized or padding bytes. /// 2. The type must have an alignment of 1 byte, or it is a ZST that is safe to construct. /// 3. The impl of [`ULE::validate_bytes()`] *must* return an error if the given byte slice /// would not represent a valid slice of this type. /// 4. The impl of [`ULE::validate_bytes()`] *must* return an error if the given byte slice /// cannot be used in its entirety (if its length is not a multiple of `size_of::<Self>()`). /// 5. All other methods *must* be left with their default impl, or else implemented according to /// their respective safety guidelines. /// 6. Acknowledge the following note about the equality invariant. /// /// If the ULE type is a struct only containing other ULE types (or other types which satisfy invariants 1 and 2, /// like `[u8; N]`), invariants 1 and 2 can be achieved via `#[repr(C, packed)]` or `#[repr(transparent)]`. /// /// # Equality invariant /// /// A non-safety invariant is that if `Self` implements `PartialEq`, the it *must* be logically /// equivalent to byte equality on [`Self::slice_as_bytes()`]. /// /// It may be necessary to introduce a "canonical form" of the ULE if logical equality does not /// equal byte equality. In such a case, [`Self::validate_bytes()`] should return an error /// for any values that are not in canonical form. For example, the decimal strings "1.23e4" and /// "12.3e3" are logically equal, but not byte-for-byte equal, so we could define a canonical form /// where only a single digit is allowed before `.`. /// /// Failure to follow this invariant will cause surprising behavior in `PartialEq`, which may /// result in unpredictable operations on `ZeroVec`, `VarZeroVec`, and `ZeroMap`. pub unsafe trait ULE where Self: Sized, Self: Copy + 'static, { /// Validates a byte slice, `&[u8]`. /// /// If `Self` is not well-defined for all possible bit values, the bytes should be validated. /// If the bytes can be transmuted, *in their entirety*, to a valid slice of `Self`, then `Ok` /// should be returned; otherwise, `Err` should be returned. fn validate_bytes(bytes: &[u8]) -> Result<(), UleError>; /// Parses a byte slice, `&[u8]`, and return it as `&[Self]` with the same lifetime. /// /// If `Self` is not well-defined for all possible bit values, the bytes should be validated, /// and an error should be returned in the same cases as [`Self::validate_bytes()`]. /// /// The default implementation executes [`Self::validate_bytes()`] followed by /// [`Self::slice_from_bytes_unchecked`]. /// /// Note: The following equality should hold: `bytes.len() % size_of::<Self>() == 0`. This /// means that the returned slice can span the entire byte slice. fn parse_bytes_to_slice(bytes: &[u8]) -> Result<&[Self], UleError> { Self::validate_bytes(bytes)?; debug_assert_eq!(bytes.len() % mem::size_of::<Self>(), 0); Ok(unsafe { Self::slice_from_bytes_unchecked(bytes) }) } /// Takes a byte slice, `&[u8]`, and return it as `&[Self]` with the same lifetime, assuming /// that this byte slice has previously been run through [`Self::parse_bytes_to_slice()`] with /// success. /// /// The default implementation performs a pointer cast to the same region of memory. /// /// # Safety /// /// ## Callers /// /// Callers of this method must take care to ensure that `bytes` was previously passed through /// [`Self::validate_bytes()`] with success (and was not changed since then). /// /// ## Implementors /// /// Implementations of this method may call unsafe functions to cast the pointer to the correct /// type, assuming the "Callers" invariant above. /// /// Keep in mind that `&[Self]` and `&[u8]` may have different lengths. /// /// Safety checklist: /// /// 1. This method *must* return the same result as [`Self::parse_bytes_to_slice()`]. /// 2. This method *must* return a slice to the same region of memory as the argument. #[inline] unsafe fn slice_from_bytes_unchecked(bytes: &[u8]) -> &[Self] { let data = bytes.as_ptr(); let len = bytes.len() / mem::size_of::<Self>(); debug_assert_eq!(bytes.len() % mem::size_of::<Self>(), 0); core::slice::from_raw_parts(data as *const Self, len) } /// Given `&[Self]`, returns a `&[u8]` with the same lifetime. /// /// The default implementation performs a pointer cast to the same region of memory. /// /// # Safety /// /// Implementations of this method should call potentially unsafe functions to cast the /// pointer to the correct type. /// /// Keep in mind that `&[Self]` and `&[u8]` may have different lengths. #[inline] #[allow(clippy::wrong_self_convention)] // https://github.com/rust-lang/rust-clippy/issues/7219 fn slice_as_bytes(slice: &[Self]) -> &[u8] { unsafe { slice::from_raw_parts(slice as *const [Self] as *const u8, mem::size_of_val(slice)) } } } /// A trait for any type that has a 1:1 mapping with an unaligned little-endian (ULE) type. /// /// If you need to implement this trait, consider using [`#[make_ule]`](crate::make_ule) instead. pub trait AsULE: Copy { /// The ULE type corresponding to `Self`. /// /// Types having infallible conversions from all bit values (Plain Old Data) can use /// `RawBytesULE` with the desired width; for example, `u32` uses `RawBytesULE<4>`. /// /// Types that are not well-defined for all bit values should implement a custom ULE. type ULE: ULE; /// Converts from `Self` to `Self::ULE`. /// /// This function may involve byte order swapping (native-endian to little-endian). /// /// For best performance, mark your implementation of this function `#[inline]`. fn to_unaligned(self) -> Self::ULE; /// Converts from `Self::ULE` to `Self`. /// /// This function may involve byte order swapping (little-endian to native-endian). /// /// For best performance, mark your implementation of this function `#[inline]`. /// /// # Safety /// /// This function is infallible because bit validation should have occurred when `Self::ULE` /// was first constructed. An implementation may therefore involve an `unsafe{}` block, like /// `from_bytes_unchecked()`. fn from_unaligned(unaligned: Self::ULE) -> Self; } /// A type whose byte sequence equals the byte sequence of its ULE type on /// little-endian platforms. /// /// This enables certain performance optimizations, such as /// [`ZeroVec::try_from_slice`](crate::ZeroVec::try_from_slice). /// /// # Implementation safety /// /// This trait is safe to implement if the type's ULE (as defined by `impl `[`AsULE`]` for T`) /// has an equal byte sequence as the type itself on little-endian platforms; i.e., one where /// `*const T` can be cast to a valid `*const T::ULE`. pub unsafe trait EqULE: AsULE {} /// A trait for a type where aligned slices can be cast to unaligned slices. /// /// Auto-implemented on all types implementing [`EqULE`]. pub trait SliceAsULE where Self: AsULE + Sized, { /// Converts from `&[Self]` to `&[Self::ULE]` if possible. /// /// In general, this function returns `Some` on little-endian and `None` on big-endian. fn slice_to_unaligned(slice: &[Self]) -> Option<&[Self::ULE]>; } #[cfg(target_endian = "little")] impl<T> SliceAsULE for T where T: EqULE, { #[inline] fn slice_to_unaligned(slice: &[Self]) -> Option<&[Self::ULE]> { // This is safe because on little-endian platforms, the byte sequence of &[T] // is equivalent to the byte sequence of &[T::ULE] by the contract of EqULE, // and &[T::ULE] has equal or looser alignment than &[T]. let ule_slice = unsafe { core::slice::from_raw_parts(slice.as_ptr() as *const Self::ULE, slice.len()) }; Some(ule_slice) } } #[cfg(not(target_endian = "little"))] impl<T> SliceAsULE for T where T: EqULE, { #[inline] fn slice_to_unaligned(_: &[Self]) -> Option<&[Self::ULE]> { None } } /// Variable-width, byte-aligned data that can be cast to and from a little-endian byte slice. /// /// If you need to implement this trait, consider using [`#[make_varule]`](crate::make_varule) or /// [`#[derive(VarULE)]`](macro@VarULE) instead. /// /// This trait is mostly for unsized types like `str` and `[T]`. It can be implemented on sized types; /// however, it is much more preferable to use [`ULE`] for that purpose. The [`custom`] module contains /// additional documentation on how this type can be implemented on custom types. /// /// If deserialization with `VarZeroVec` is desired is recommended to implement `Deserialize` for /// `Box<T>` (serde does not do this automatically for unsized `T`). /// /// For convenience it is typically desired to implement [`EncodeAsVarULE`] and [`ZeroFrom`](zerofrom::ZeroFrom) /// on some stack type to convert to and from the ULE type efficiently when necessary. /// /// # Safety /// /// Safety checklist for `VarULE`: /// /// 1. The type *must not* include any uninitialized or padding bytes. /// 2. The type must have an alignment of 1 byte. /// 3. The impl of [`VarULE::validate_bytes()`] *must* return an error if the given byte slice /// would not represent a valid slice of this type. /// 4. The impl of [`VarULE::validate_bytes()`] *must* return an error if the given byte slice /// cannot be used in its entirety. /// 5. The impl of [`VarULE::from_bytes_unchecked()`] must produce a reference to the same /// underlying data assuming that the given bytes previously passed validation. /// 6. All other methods *must* be left with their default impl, or else implemented according to /// their respective safety guidelines. /// 7. Acknowledge the following note about the equality invariant. /// /// If the ULE type is a struct only containing other ULE/VarULE types (or other types which satisfy invariants 1 and 2, /// like `[u8; N]`), invariants 1 and 2 can be achieved via `#[repr(C, packed)]` or `#[repr(transparent)]`. /// /// # Equality invariant /// /// A non-safety invariant is that if `Self` implements `PartialEq`, the it *must* be logically /// equivalent to byte equality on [`Self::as_bytes()`]. /// /// It may be necessary to introduce a "canonical form" of the ULE if logical equality does not /// equal byte equality. In such a case, [`Self::validate_bytes()`] should return an error /// for any values that are not in canonical form. For example, the decimal strings "1.23e4" and /// "12.3e3" are logically equal, but not byte-for-byte equal, so we could define a canonical form /// where only a single digit is allowed before `.`. /// /// There may also be cases where a `VarULE` has muiltiple canonical forms, such as a faster /// version and a smaller version. The cleanest way to handle this case would be separate types. /// However, if this is not feasible, then the application should ensure that the data it is /// deserializing is in the expected form. For example, if the data is being loaded from an /// external source, then requests could carry information about the expected form of the data. /// /// Failure to follow this invariant will cause surprising behavior in `PartialEq`, which may /// result in unpredictable operations on `ZeroVec`, `VarZeroVec`, and `ZeroMap`. pub unsafe trait VarULE: 'static { /// Validates a byte slice, `&[u8]`. /// /// If `Self` is not well-defined for all possible bit values, the bytes should be validated. /// If the bytes can be transmuted, *in their entirety*, to a valid `&Self`, then `Ok` should /// be returned; otherwise, `Self::Error` should be returned. fn validate_bytes(_bytes: &[u8]) -> Result<(), UleError>; /// Parses a byte slice, `&[u8]`, and return it as `&Self` with the same lifetime. /// /// If `Self` is not well-defined for all possible bit values, the bytes should be validated, /// and an error should be returned in the same cases as [`Self::validate_bytes()`]. /// /// The default implementation executes [`Self::validate_bytes()`] followed by /// [`Self::from_bytes_unchecked`]. /// /// Note: The following equality should hold: `size_of_val(result) == size_of_val(bytes)`, /// where `result` is the successful return value of the method. This means that the return /// value spans the entire byte slice. fn parse_bytes(bytes: &[u8]) -> Result<&Self, UleError> { Self::validate_bytes(bytes)?; let result = unsafe { Self::from_bytes_unchecked(bytes) }; debug_assert_eq!(mem::size_of_val(result), mem::size_of_val(bytes)); Ok(result) } /// Takes a byte slice, `&[u8]`, and return it as `&Self` with the same lifetime, assuming /// that this byte slice has previously been run through [`Self::parse_bytes()`] with /// success. /// /// # Safety /// /// ## Callers /// /// Callers of this method must take care to ensure that `bytes` was previously passed through /// [`Self::validate_bytes()`] with success (and was not changed since then). /// /// ## Implementors /// /// Implementations of this method may call unsafe functions to cast the pointer to the correct /// type, assuming the "Callers" invariant above. /// /// Safety checklist: /// /// 1. This method *must* return the same result as [`Self::parse_bytes()`]. /// 2. This method *must* return a slice to the same region of memory as the argument. unsafe fn from_bytes_unchecked(bytes: &[u8]) -> &Self; /// Given `&Self`, returns a `&[u8]` with the same lifetime. /// /// The default implementation performs a pointer cast to the same region of memory. /// /// # Safety /// /// Implementations of this method should call potentially unsafe functions to cast the /// pointer to the correct type. #[inline] fn as_bytes(&self) -> &[u8] { unsafe { slice::from_raw_parts(self as *const Self as *const u8, mem::size_of_val(self)) } } /// Allocate on the heap as a `Box<T>` #[inline] #[cfg(feature = "alloc")] fn to_boxed(&self) -> alloc::boxed::Box<Self> { use alloc::borrow::ToOwned; use alloc::boxed::Box; use core::alloc::Layout; let bytesvec = self.as_bytes().to_owned().into_boxed_slice(); let bytesvec = mem::ManuallyDrop::new(bytesvec); unsafe { // Get the pointer representation let ptr: *mut Self = Self::from_bytes_unchecked(&bytesvec) as *const Self as *mut Self; assert_eq!(Layout::for_value(&*ptr), Layout::for_value(&**bytesvec)); // Transmute the pointer to an owned pointer Box::from_raw(ptr) } } } // Proc macro reexports // // These exist so that our docs can use intra-doc links. // Due to quirks of how rustdoc does documentation on reexports, these must be in this module and not reexported from // a submodule /// Custom derive for [`ULE`]. /// /// This can be attached to [`Copy`] structs containing only [`ULE`] types. /// /// Most of the time, it is recommended one use [`#[make_ule]`](crate::make_ule) instead of defining /// a custom ULE type. #[cfg(feature = "derive")] pub use zerovec_derive::ULE; /// Custom derive for [`VarULE`] /// /// This can be attached to structs containing only [`ULE`] types with one [`VarULE`] type at the end. /// /// Most of the time, it is recommended one use [`#[make_varule]`](crate::make_varule) instead of defining /// a custom [`VarULE`] type. #[cfg(feature = "derive")] pub use zerovec_derive::VarULE; /// An error type to be used for decoding slices of ULE types #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[non_exhaustive] pub enum UleError { /// Attempted to parse a buffer into a slice of the given ULE type but its /// length was not compatible. /// /// Typically created by a [`ULE`] impl via [`UleError::length()`]. /// /// [`ULE`]: crate::ule::ULE InvalidLength { ty: &'static str, len: usize }, /// The byte sequence provided for `ty` failed to parse correctly in the /// given ULE type. /// /// Typically created by a [`ULE`] impl via [`UleError::parse()`]. /// /// [`ULE`]: crate::ule::ULE ParseError { ty: &'static str }, } impl fmt::Display for UleError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { match *self { UleError::InvalidLength { ty, len } => { write!(f, "Invalid length {len} for slice of type {ty}") } UleError::ParseError { ty } => { write!(f, "Could not parse bytes to slice of type {ty}") } } } } impl UleError { /// Construct a parse error for the given type pub fn parse<T: ?Sized + 'static>() -> UleError { UleError::ParseError { ty: any::type_name::<T>(), } } /// Construct an "invalid length" error for the given type and length pub fn length<T: ?Sized + 'static>(len: usize) -> UleError { UleError::InvalidLength { ty: any::type_name::<T>(), len, } } } impl core::error::Error for UleError {}
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use super::*; use crate::varzerovec::lengthless::VarZeroLengthlessSlice; use crate::vecs::VarZeroVecFormat; use core::{fmt, mem}; /// This type is used by the custom derive to represent multiple [`VarULE`] /// fields packed into a single end-of-struct field. It is not recommended /// to use this type directly, use [`Tuple2VarULE`](crate::ule::tuplevar::Tuple2VarULE) etc instead. /// /// Logically, consider it to be `(, , , ..)` /// where `` etc are potentially different [`VarULE`] types. /// /// Internally, it is represented by a VarZeroSlice without the length part. #[derive(PartialEq, Eq)] #[repr(transparent)] pub struct MultiFieldsULE<const LEN: usize, Format: VarZeroVecFormat>( VarZeroLengthlessSlice<[u8], Format>, ); impl<const LEN: usize, Format: VarZeroVecFormat> MultiFieldsULE<LEN, Format> { /// Compute the amount of bytes needed to support elements with lengths `lengths` #[inline] #[allow(clippy::expect_used)] // See #1410 pub fn compute_encoded_len_for(lengths: [usize; LEN]) -> usize { let lengths = lengths.map(BlankSliceEncoder); crate::varzerovec::components::compute_serializable_len_without_length::<_, _, Format>( &lengths, ) .expect("Too many bytes to encode") as usize } /// Construct a partially initialized MultiFieldsULE backed by a mutable byte buffer pub fn new_from_lengths_partially_initialized<'a>( lengths: [usize; LEN], output: &'a mut [u8], ) -> &'a mut Self { let lengths = lengths.map(BlankSliceEncoder); crate::varzerovec::components::write_serializable_bytes_without_length::<_, _, Format>( &lengths, output, ); debug_assert!( <VarZeroLengthlessSlice<[u8], Format>>::parse_bytes(LEN as u32, output).is_ok(), "Encoded slice must be valid VarZeroSlice" ); unsafe { // Safe since write_serializable_bytes produces a valid VarZeroLengthlessSlice buffer with the right format let slice = <VarZeroLengthlessSlice<[u8], Format>>::from_bytes_unchecked_mut(output); // safe since `Self` is transparent over VarZeroLengthlessSlice<[u8], Format> mem::transmute::<&mut VarZeroLengthlessSlice<[u8], Format>, &mut Self>(slice) } } /// Given a buffer of size obtained by [`Self::compute_encoded_len_for()`], write element A to index idx /// /// # Safety /// - `idx` must be in range /// - `T` must be the appropriate type expected by the custom derive in this usage of this type #[inline] pub unsafe fn set_field_at<T: VarULE + ?Sized, A: EncodeAsVarULE<T> + ?Sized>( &mut self, idx: usize, value: &A, ) { value.encode_var_ule_write(self.0.get_bytes_at_mut(LEN as u32, idx)) } /// Validate field at `index` to see if it is a valid `T` VarULE type /// /// # Safety /// /// - `index` must be in range #[inline] pub unsafe fn validate_field<T: VarULE + ?Sized>(&self, index: usize) -> Result<(), UleError> { T::validate_bytes(self.0.get_unchecked(LEN as u32, index)) } /// Get field at `index` as a value of type T /// /// # Safety /// /// - `index` must be in range /// - Element at `index` must have been created with the VarULE type T #[inline] pub unsafe fn get_field<T: VarULE + ?Sized>(&self, index: usize) -> &T { T::from_bytes_unchecked(self.0.get_unchecked(LEN as u32, index)) } /// Construct from a byte slice /// /// # Safety /// - byte slice must be a valid VarZeroLengthlessSlice<[u8], Format> with length LEN #[inline] pub unsafe fn from_bytes_unchecked(bytes: &[u8]) -> &Self { // &Self is transparent over &VZS<..> with the right format mem::transmute(<VarZeroLengthlessSlice<[u8], Format>>::from_bytes_unchecked(bytes)) } /// Get the bytes behind this value pub fn as_bytes(&self) -> &[u8] { self.0.as_bytes() } } impl<const LEN: usize, Format: VarZeroVecFormat> fmt::Debug for MultiFieldsULE<LEN, Format> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "MultiFieldsULE<{LEN}>({:?})", self.0.as_bytes()) } } /// This lets us conveniently use the EncodeAsVarULE functionality to create /// `VarZeroVec<[u8]>`s that have the right amount of space for elements /// without having to duplicate any unsafe code #[repr(transparent)] struct BlankSliceEncoder(usize); unsafe impl EncodeAsVarULE<[u8]> for BlankSliceEncoder { fn encode_var_ule_as_slices<R>(&self, _: impl FnOnce(&[&[u8]]) -> R) -> R { // unnecessary if the other two are implemented unreachable!() } #[inline] fn encode_var_ule_len(&self) -> usize { self.0 } #[inline] fn encode_var_ule_write(&self, _dst: &mut [u8]) { // do nothing } } // Safety (based on the safety checklist on the VarULE trait): // 1. MultiFieldsULE does not include any uninitialized or padding bytes (achieved by being transparent over a VarULE type) // 2. MultiFieldsULE is aligned to 1 byte (achieved by being transparent over a VarULE type) // 3. The impl of `validate_bytes()` returns an error if any byte is not valid. // 4. The impl of `validate_bytes()` returns an error if the slice cannot be used in its entirety // 5. The impl of `from_bytes_unchecked()` returns a reference to the same data. // 6. All other methods are defaulted // 7. `MultiFieldsULE` byte equality is semantic equality (achieved by being transparent over a VarULE type) unsafe impl<const LEN: usize, Format: VarZeroVecFormat> VarULE for MultiFieldsULE<LEN, Format> { /// Note: MultiFieldsULE is usually used in cases where one should be calling .validate_field() directly for /// each field, rather than using the regular VarULE impl. /// /// This impl exists so that EncodeAsVarULE can work. #[inline] fn validate_bytes(slice: &[u8]) -> Result<(), UleError> { <VarZeroLengthlessSlice<[u8], Format>>::parse_bytes(LEN as u32, slice).map(|_| ()) } #[inline] unsafe fn from_bytes_unchecked(bytes: &[u8]) -> &Self { // &Self is transparent over &VZS<..> mem::transmute(<VarZeroLengthlessSlice<[u8], Format>>::from_bytes_unchecked(bytes)) } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use core::{marker::Copy, mem::size_of}; #[cfg(feature = "alloc")] use crate::map::ZeroMapKV; #[cfg(feature = "alloc")] use crate::{ZeroSlice, ZeroVec}; use super::{AsULE, ULE}; /// The [`ULE`] types implementing this trait guarantee that [`NicheBytes::NICHE_BIT_PATTERN`] /// can never occur as a valid byte representation of the type. /// /// Guarantees for a valid implementation. /// 1. N must be equal to `core::mem::sizeo_of::<Self>()` or else it will /// cause panics. /// 2. The bit pattern [`NicheBytes::NICHE_BIT_PATTERN`] must not be incorrect as it would lead to /// weird behaviour. /// 3. The abstractions built on top of this trait must panic on an invalid N. /// 4. The abstractions built on this trait that use type punning must ensure that type being /// punned is [`ULE`]. pub trait NicheBytes<const N: usize> { const NICHE_BIT_PATTERN: [u8; N]; } /// [`ULE`] type for [`NichedOption<U,N>`] where U implements [`NicheBytes`]. /// The invalid bit pattern is used as the niche. /// /// This uses 1 byte less than [`crate::ule::OptionULE<U>`] to represent [`NichedOption<U,N>`]. /// /// # Example /// /// ``` /// use core::num::NonZeroI8; /// use zerovec::ule::NichedOption; /// use zerovec::ZeroVec; /// /// let bytes = &[0x00, 0x01, 0x02, 0x00]; /// let zv_no: ZeroVec<NichedOption<NonZeroI8, 1>> = /// ZeroVec::parse_bytes(bytes).expect("Unable to parse as NichedOption."); /// /// assert_eq!(zv_no.get(0).map(|e| e.0), Some(None)); /// assert_eq!(zv_no.get(1).map(|e| e.0), Some(NonZeroI8::new(1))); /// assert_eq!(zv_no.get(2).map(|e| e.0), Some(NonZeroI8::new(2))); /// assert_eq!(zv_no.get(3).map(|e| e.0), Some(None)); /// ``` // Invariants: // The union stores [`NicheBytes::NICHE_BIT_PATTERN`] when None. // Any other bit pattern is a valid. #[repr(C)] pub union NichedOptionULE<U: NicheBytes<N> + ULE, const N: usize> { /// Invariant: The value is `niche` only if the bytes equal NICHE_BIT_PATTERN. niche: [u8; N], /// Invariant: The value is `valid` if the `niche` field does not match NICHE_BIT_PATTERN. valid: U, } impl<U: NicheBytes<N> + ULE + core::fmt::Debug, const N: usize> core::fmt::Debug for NichedOptionULE<U, N> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { self.get().fmt(f) } } impl<U: NicheBytes<N> + ULE, const N: usize> NichedOptionULE<U, N> { /// New `NichedOptionULE<U, N>` from `Option<U>` pub fn new(opt: Option<U>) -> Self { assert!(N == core::mem::size_of::<U>()); match opt { Some(u) => Self { valid: u }, None => Self { niche: <U as NicheBytes<N>>::NICHE_BIT_PATTERN, }, } } /// Convert to an `Option<U>` pub fn get(self) -> Option<U> { // Safety: The union stores NICHE_BIT_PATTERN when None otherwise a valid U unsafe { if self.niche == <U as NicheBytes<N>>::NICHE_BIT_PATTERN { None } else { Some(self.valid) } } } /// Borrows as an `Option<&U>`. pub fn as_ref(&self) -> Option<&U> { // Safety: The union stores NICHE_BIT_PATTERN when None otherwise a valid U unsafe { if self.niche == <U as NicheBytes<N>>::NICHE_BIT_PATTERN { None } else { Some(&self.valid) } } } } impl<U: NicheBytes<N> + ULE, const N: usize> Copy for NichedOptionULE<U, N> {} impl<U: NicheBytes<N> + ULE, const N: usize> Clone for NichedOptionULE<U, N> { fn clone(&self) -> Self { *self } } impl<U: NicheBytes<N> + ULE + PartialEq, const N: usize> PartialEq for NichedOptionULE<U, N> { fn eq(&self, other: &Self) -> bool { self.get().eq(&other.get()) } } impl<U: NicheBytes<N> + ULE + Eq, const N: usize> Eq for NichedOptionULE<U, N> {} /// Safety for ULE trait /// 1. NichedOptionULE does not have any padding bytes due to `#[repr(C)]` on a struct /// containing only ULE fields. /// NichedOptionULE either contains NICHE_BIT_PATTERN or valid U byte sequences. /// In both cases the data is initialized. /// 2. NichedOptionULE is aligned to 1 byte due to `#[repr(C, packed)]` on a struct containing only /// ULE fields. /// 3. validate_bytes impl returns an error if invalid bytes are encountered. /// 4. validate_bytes impl returns an error there are extra bytes. /// 5. The other ULE methods are left to their default impl. /// 6. NichedOptionULE equality is based on ULE equality of the subfield, assuming that NicheBytes /// has been implemented correctly (this is a correctness but not a safety guarantee). unsafe impl<U: NicheBytes<N> + ULE, const N: usize> ULE for NichedOptionULE<U, N> { fn validate_bytes(bytes: &[u8]) -> Result<(), crate::ule::UleError> { let size = size_of::<Self>(); // The implemention is only correct if NICHE_BIT_PATTERN has same number of bytes as the // type. debug_assert!(N == core::mem::size_of::<U>()); // The bytes should fully transmute to a collection of Self if bytes.len() % size != 0 { return Err(crate::ule::UleError::length::<Self>(bytes.len())); } bytes.chunks(size).try_for_each(|chunk| { // Associated const cannot be referenced in a pattern // https://doc.rust-lang.org/error-index.html#E0158 if chunk == <U as NicheBytes<N>>::NICHE_BIT_PATTERN { Ok(()) } else { U::validate_bytes(chunk) } }) } } /// Optional type which uses [`NichedOptionULE<U,N>`] as ULE type. /// /// The implementors guarantee that `N == core::mem::size_of::<Self>()` /// [`repr(transparent)`] guarantees that the layout is same as [`Option<U>`] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] #[repr(transparent)] #[allow(clippy::exhaustive_structs)] // newtype #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct NichedOption<U, const N: usize>(pub Option<U>); impl<U, const N: usize> Default for NichedOption<U, N> { fn default() -> Self { Self(None) } } impl<U: AsULE, const N: usize> AsULE for NichedOption<U, N> where U::ULE: NicheBytes<N>, { type ULE = NichedOptionULE<U::ULE, N>; fn to_unaligned(self) -> Self::ULE { NichedOptionULE::new(self.0.map(U::to_unaligned)) } fn from_unaligned(unaligned: Self::ULE) -> Self { Self(unaligned.get().map(U::from_unaligned)) } } #[cfg(feature = "alloc")] impl<'a, T: AsULE + 'static, const N: usize> ZeroMapKV<'a> for NichedOption<T, N> where T::ULE: NicheBytes<N>, { type Container = ZeroVec<'a, NichedOption<T, N>>; type Slice = ZeroSlice<NichedOption<T, N>>; type GetType = <NichedOption<T, N> as AsULE>::ULE; type OwnedType = Self; } impl<T, const N: usize> IntoIterator for NichedOption<T, N> { type IntoIter = <Option<T> as IntoIterator>::IntoIter; type Item = T; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use super::*; use core::cmp::Ordering; use core::marker::PhantomData; use core::mem::{self, MaybeUninit}; /// This type is the [`ULE`] type for `Option<U>` where `U` is a [`ULE`] type /// /// # Example /// /// ```rust /// use zerovec::ZeroVec; /// /// let z = ZeroVec::alloc_from_slice(&[ /// Some('a'), /// Some('á'), /// Some('ø'), /// None, /// Some('ł'), /// ]); /// /// assert_eq!(z.get(2), Some(Some('ø'))); /// assert_eq!(z.get(3), Some(None)); /// ``` // Invariants: // The MaybeUninit is zeroed when None (bool = false), // and is valid when Some (bool = true) #[repr(C, packed)] pub struct OptionULE<U>(bool, MaybeUninit<U>); impl<U: Copy> OptionULE<U> { /// Obtain this as an `Option<T>` pub fn get(self) -> Option<U> { if self.0 { unsafe { // safety: self.0 is true so the MaybeUninit is valid Some(self.1.assume_init()) } } else { None } } /// Construct an `OptionULE<U>` from an equivalent `Option<T>` pub fn new(opt: Option<U>) -> Self { if let Some(inner) = opt { Self(true, MaybeUninit::new(inner)) } else { Self(false, MaybeUninit::zeroed()) } } } impl<U: Copy + core::fmt::Debug> core::fmt::Debug for OptionULE<U> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { self.get().fmt(f) } } // Safety (based on the safety checklist on the ULE trait): // 1. OptionULE does not include any uninitialized or padding bytes. // (achieved by `#[repr(C, packed)]` on a struct containing only ULE fields, // in the context of this impl. The MaybeUninit is valid for all byte sequences, and we only generate /// zeroed or valid-T byte sequences to fill it) // 2. OptionULE is aligned to 1 byte. // (achieved by `#[repr(C, packed)]` on a struct containing only ULE fields, in the context of this impl) // 3. The impl of validate_bytes() returns an error if any byte is not valid. // 4. The impl of validate_bytes() returns an error if there are extra bytes. // 5. The other ULE methods use the default impl. // 6. OptionULE byte equality is semantic equality by relying on the ULE equality // invariant on the subfields unsafe impl<U: ULE> ULE for OptionULE<U> { fn validate_bytes(bytes: &[u8]) -> Result<(), UleError> { let size = mem::size_of::<Self>(); if bytes.len() % size != 0 { return Err(UleError::length::<Self>(bytes.len())); } for chunk in bytes.chunks(size) { #[allow(clippy::indexing_slicing)] // `chunk` will have enough bytes to fit Self match chunk[0] { // https://doc.rust-lang.org/reference/types/boolean.html // Rust booleans are always size 1, align 1 values with valid bit patterns 0x0 or 0x1 0 => { if !chunk[1..].iter().all(|x| *x == 0) { return Err(UleError::parse::<Self>()); } } 1 => U::validate_bytes(&chunk[1..])?, _ => return Err(UleError::parse::<Self>()), } } Ok(()) } } impl<T: AsULE> AsULE for Option<T> { type ULE = OptionULE<T::ULE>; fn to_unaligned(self) -> OptionULE<T::ULE> { OptionULE::new(self.map(T::to_unaligned)) } fn from_unaligned(other: OptionULE<T::ULE>) -> Self { other.get().map(T::from_unaligned) } } impl<U: Copy> Copy for OptionULE<U> {} impl<U: Copy> Clone for OptionULE<U> { fn clone(&self) -> Self { *self } } impl<U: Copy + PartialEq> PartialEq for OptionULE<U> { fn eq(&self, other: &Self) -> bool { self.get().eq(&other.get()) } } impl<U: Copy + Eq> Eq for OptionULE<U> {} /// A type allowing one to represent `Option<U>` for [`VarULE`] `U` types. /// /// ```rust /// use zerovec::ule::OptionVarULE; /// use zerovec::VarZeroVec; /// /// let mut zv: VarZeroVec<OptionVarULE<str>> = VarZeroVec::new(); /// /// zv.make_mut().push(&None::<&str>); /// zv.make_mut().push(&Some("hello")); /// zv.make_mut().push(&Some("world")); /// zv.make_mut().push(&None::<&str>); /// /// assert_eq!(zv.get(0).unwrap().as_ref(), None); /// assert_eq!(zv.get(1).unwrap().as_ref(), Some("hello")); /// ``` // The slice field is empty when None (bool = false), // and is a valid T when Some (bool = true) #[repr(C, packed)] pub struct OptionVarULE<U: VarULE + ?Sized>(PhantomData<U>, bool, [u8]); impl<U: VarULE + ?Sized> OptionVarULE<U> { /// Obtain this as an `Option<&U>` pub fn as_ref(&self) -> Option<&U> { if self.1 { unsafe { // Safety: byte field is a valid T if boolean field is true Some(U::from_bytes_unchecked(&self.2)) } } else { None } } } impl<U: VarULE + ?Sized + core::fmt::Debug> core::fmt::Debug for OptionVarULE<U> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { self.as_ref().fmt(f) } } // Safety (based on the safety checklist on the VarULE trait): // 1. OptionVarULE<T> does not include any uninitialized or padding bytes // (achieved by being repr(C, packed) on ULE types) // 2. OptionVarULE<T> is aligned to 1 byte (achieved by being repr(C, packed) on ULE types) // 3. The impl of `validate_bytes()` returns an error if any byte is not valid. // 4. The impl of `validate_bytes()` returns an error if the slice cannot be used in its entirety // 5. The impl of `from_bytes_unchecked()` returns a reference to the same data. // 6. All other methods are defaulted // 7. OptionVarULE<T> byte equality is semantic equality (achieved by being an aggregate) unsafe impl<U: VarULE + ?Sized> VarULE for OptionVarULE<U> { #[inline] fn validate_bytes(slice: &[u8]) -> Result<(), UleError> { if slice.is_empty() { return Err(UleError::length::<Self>(slice.len())); } #[allow(clippy::indexing_slicing)] // slice already verified to be nonempty match slice[0] { // https://doc.rust-lang.org/reference/types/boolean.html // Rust booleans are always size 1, align 1 values with valid bit patterns 0x0 or 0x1 0 => { if slice.len() != 1 { Err(UleError::length::<Self>(slice.len())) } else { Ok(()) } } 1 => U::validate_bytes(&slice[1..]), _ => Err(UleError::parse::<Self>()), } } #[inline] unsafe fn from_bytes_unchecked(bytes: &[u8]) -> &Self { let entire_struct_as_slice: *const [u8] = ::core::ptr::slice_from_raw_parts(bytes.as_ptr(), bytes.len() - 1); &*(entire_struct_as_slice as *const Self) } } unsafe impl<T, U> EncodeAsVarULE<OptionVarULE<U>> for Option<T> where T: EncodeAsVarULE<U>, U: VarULE + ?Sized, { fn encode_var_ule_as_slices<R>(&self, _: impl FnOnce(&[&[u8]]) -> R) -> R { // unnecessary if the other two are implemented unreachable!() } #[inline] fn encode_var_ule_len(&self) -> usize { if let Some(ref inner) = *self { // slice + boolean 1 + inner.encode_var_ule_len() } else { // boolean + empty slice 1 } } #[allow(clippy::indexing_slicing)] // This method is allowed to panic when lengths are invalid fn encode_var_ule_write(&self, dst: &mut [u8]) { if let Some(ref inner) = *self { debug_assert!( !dst.is_empty(), "OptionVarULE must have at least one byte when Some" ); dst[0] = 1; inner.encode_var_ule_write(&mut dst[1..]); } else { debug_assert!( dst.len() == 1, "OptionVarULE must have exactly one byte when None" ); dst[0] = 0; } } } impl<U: VarULE + ?Sized + PartialEq> PartialEq for OptionVarULE<U> { fn eq(&self, other: &Self) -> bool { self.as_ref().eq(&other.as_ref()) } } impl<U: VarULE + ?Sized + Eq> Eq for OptionVarULE<U> {} impl<U: VarULE + ?Sized + PartialOrd> PartialOrd for OptionVarULE<U> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.as_ref().partial_cmp(&other.as_ref()) } } impl<U: VarULE + ?Sized + Ord> Ord for OptionVarULE<U> { fn cmp(&self, other: &Self) -> Ordering { self.as_ref().cmp(&other.as_ref()) } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). #![allow(clippy::upper_case_acronyms)] //! ULE implementation for Plain Old Data types, including all sized integers. use super::*; use crate::impl_ule_from_array; use crate::ZeroSlice; use core::num::{NonZeroI8, NonZeroU8}; /// A u8 array of little-endian data with infallible conversions to and from &[u8]. #[repr(transparent)] #[derive(Debug, PartialEq, Eq, Clone, Copy, PartialOrd, Ord, Hash)] #[allow(clippy::exhaustive_structs)] // newtype pub struct RawBytesULE<const N: usize>(pub [u8; N]); impl<const N: usize> RawBytesULE<N> { #[inline] pub fn as_bytes(&self) -> &[u8] { &self.0 } #[inline] pub fn from_bytes_unchecked_mut(bytes: &mut [u8]) -> &mut [Self] { let data = bytes.as_mut_ptr(); let len = bytes.len() / N; // Safe because Self is transparent over [u8; N] unsafe { core::slice::from_raw_parts_mut(data as *mut Self, len) } } } // Safety (based on the safety checklist on the ULE trait): // 1. RawBytesULE does not include any uninitialized or padding bytes. // (achieved by `#[repr(transparent)]` on a type that satisfies this invariant) // 2. RawBytesULE is aligned to 1 byte. // (achieved by `#[repr(transparent)]` on a type that satisfies this invariant) // 3. The impl of validate_bytes() returns an error if any byte is not valid (never). // 4. The impl of validate_bytes() returns an error if there are leftover bytes. // 5. The other ULE methods use the default impl. // 6. RawBytesULE byte equality is semantic equality unsafe impl<const N: usize> ULE for RawBytesULE<N> { #[inline] fn validate_bytes(bytes: &[u8]) -> Result<(), UleError> { if bytes.len() % N == 0 { // Safe because Self is transparent over [u8; N] Ok(()) } else { Err(UleError::length::<Self>(bytes.len())) } } } impl<const N: usize> From<[u8; N]> for RawBytesULE<N> { #[inline] fn from(le_bytes: [u8; N]) -> Self { Self(le_bytes) } } macro_rules! impl_byte_slice_size { ($unsigned:ty, $size:literal) => { impl RawBytesULE<$size> { #[doc = concat!("Gets this `RawBytesULE` as a `", stringify!($unsigned), "`. This is equivalent to calling [`AsULE::from_unaligned()`] on the appropriately sized type.")] #[inline] pub fn as_unsigned_int(&self) -> $unsigned { <$unsigned as $crate::ule::AsULE>::from_unaligned(*self) } #[doc = concat!("Converts a `", stringify!($unsigned), "` to a `RawBytesULE`. This is equivalent to calling [`AsULE::to_unaligned()`] on the appropriately sized type.")] #[inline] pub const fn from_aligned(value: $unsigned) -> Self { Self(value.to_le_bytes()) } impl_ule_from_array!( $unsigned, RawBytesULE<$size>, RawBytesULE([0; $size]) ); } }; } macro_rules! impl_const_constructors { ($base:ty, $size:literal) => { impl ZeroSlice<$base> { /// This function can be used for constructing ZeroVecs in a const context, avoiding /// parsing checks. /// /// This cannot be generic over T because of current limitations in `const`, but if /// this method is needed in a non-const context, check out [`ZeroSlice::parse_bytes()`] /// instead. /// /// See [`ZeroSlice::cast()`] for an example. pub const fn try_from_bytes(bytes: &[u8]) -> Result<&Self, UleError> { let len = bytes.len(); #[allow(clippy::modulo_one)] if len % $size == 0 { Ok(unsafe { Self::from_bytes_unchecked(bytes) }) } else { Err(UleError::InvalidLength { ty: concat!("<const construct: ", $size, ">"), len, }) } } } }; } macro_rules! impl_byte_slice_type { ($single_fn:ident, $type:ty, $size:literal) => { impl From<$type> for RawBytesULE<$size> { #[inline] fn from(value: $type) -> Self { Self(value.to_le_bytes()) } } impl AsULE for $type { type ULE = RawBytesULE<$size>; #[inline] fn to_unaligned(self) -> Self::ULE { RawBytesULE(self.to_le_bytes()) } #[inline] fn from_unaligned(unaligned: Self::ULE) -> Self { <$type>::from_le_bytes(unaligned.0) } } // EqULE is true because $type and RawBytesULE<$size> // have the same byte sequence on little-endian unsafe impl EqULE for $type {} impl RawBytesULE<$size> { pub const fn $single_fn(v: $type) -> Self { RawBytesULE(v.to_le_bytes()) } } }; } macro_rules! impl_byte_slice_unsigned_type { ($type:ty, $size:literal) => { impl_byte_slice_type!(from_unsigned, $type, $size); }; } macro_rules! impl_byte_slice_signed_type { ($type:ty, $size:literal) => { impl_byte_slice_type!(from_signed, $type, $size); }; } impl_byte_slice_size!(u16, 2); impl_byte_slice_size!(u32, 4); impl_byte_slice_size!(u64, 8); impl_byte_slice_size!(u128, 16); impl_byte_slice_unsigned_type!(u16, 2); impl_byte_slice_unsigned_type!(u32, 4); impl_byte_slice_unsigned_type!(u64, 8); impl_byte_slice_unsigned_type!(u128, 16); impl_byte_slice_signed_type!(i16, 2); impl_byte_slice_signed_type!(i32, 4); impl_byte_slice_signed_type!(i64, 8); impl_byte_slice_signed_type!(i128, 16); impl_const_constructors!(u8, 1); impl_const_constructors!(u16, 2); impl_const_constructors!(u32, 4); impl_const_constructors!(u64, 8); impl_const_constructors!(u128, 16); // Note: The f32 and f64 const constructors currently have limited use because // `f32::to_le_bytes` is not yet const. impl_const_constructors!(bool, 1); // Safety (based on the safety checklist on the ULE trait): // 1. u8 does not include any uninitialized or padding bytes. // 2. u8 is aligned to 1 byte. // 3. The impl of validate_bytes() returns an error if any byte is not valid (never). // 4. The impl of validate_bytes() returns an error if there are leftover bytes (never). // 5. The other ULE methods use the default impl. // 6. u8 byte equality is semantic equality unsafe impl ULE for u8 { #[inline] fn validate_bytes(_bytes: &[u8]) -> Result<(), UleError> { Ok(()) } } impl AsULE for u8 { type ULE = Self; #[inline] fn to_unaligned(self) -> Self::ULE { self } #[inline] fn from_unaligned(unaligned: Self::ULE) -> Self { unaligned } } // EqULE is true because u8 is its own ULE. unsafe impl EqULE for u8 {} // Safety (based on the safety checklist on the ULE trait): // 1. NonZeroU8 does not include any uninitialized or padding bytes. // 2. NonZeroU8 is aligned to 1 byte. // 3. The impl of validate_bytes() returns an error if any byte is not valid (0x00). // 4. The impl of validate_bytes() returns an error if there are leftover bytes (never). // 5. The other ULE methods use the default impl. // 6. NonZeroU8 byte equality is semantic equality unsafe impl ULE for NonZeroU8 { #[inline] fn validate_bytes(bytes: &[u8]) -> Result<(), UleError> { bytes.iter().try_for_each(|b| { if *b == 0x00 { Err(UleError::parse::<Self>()) } else { Ok(()) } }) } } impl AsULE for NonZeroU8 { type ULE = Self; #[inline] fn to_unaligned(self) -> Self::ULE { self } #[inline] fn from_unaligned(unaligned: Self::ULE) -> Self { unaligned } } unsafe impl EqULE for NonZeroU8 {} impl NicheBytes<1> for NonZeroU8 { const NICHE_BIT_PATTERN: [u8; 1] = [0x00]; } // Safety (based on the safety checklist on the ULE trait): // 1. i8 does not include any uninitialized or padding bytes. // 2. i8 is aligned to 1 byte. // 3. The impl of validate_bytes() returns an error if any byte is not valid (never). // 4. The impl of validate_bytes() returns an error if there are leftover bytes (never). // 5. The other ULE methods use the default impl. // 6. i8 byte equality is semantic equality unsafe impl ULE for i8 { #[inline] fn validate_bytes(_bytes: &[u8]) -> Result<(), UleError> { Ok(()) } } impl AsULE for i8 { type ULE = Self; #[inline] fn to_unaligned(self) -> Self::ULE { self } #[inline] fn from_unaligned(unaligned: Self::ULE) -> Self { unaligned } } // EqULE is true because i8 is its own ULE. unsafe impl EqULE for i8 {} impl AsULE for NonZeroI8 { type ULE = NonZeroU8; #[inline] fn to_unaligned(self) -> Self::ULE { // Safety: NonZeroU8 and NonZeroI8 have same size unsafe { core::mem::transmute(self) } } #[inline] fn from_unaligned(unaligned: Self::ULE) -> Self { // Safety: NonZeroU8 and NonZeroI8 have same size unsafe { core::mem::transmute(unaligned) } } } // These impls are actually safe and portable due to Rust always using IEEE 754, see the documentation // on f32::from_bits: https://doc.rust-lang.org/stable/std/primitive.f32.html#method.from_bits // // The only potential problem is that some older platforms treat signaling NaNs differently. This is // still quite portable, signalingness is not typically super important. impl AsULE for f32 { type ULE = RawBytesULE<4>; #[inline] fn to_unaligned(self) -> Self::ULE { self.to_bits().to_unaligned() } #[inline] fn from_unaligned(unaligned: Self::ULE) -> Self { Self::from_bits(u32::from_unaligned(unaligned)) } } impl AsULE for f64 { type ULE = RawBytesULE<8>; #[inline] fn to_unaligned(self) -> Self::ULE { self.to_bits().to_unaligned() } #[inline] fn from_unaligned(unaligned: Self::ULE) -> Self { Self::from_bits(u64::from_unaligned(unaligned)) } } // The from_bits documentation mentions that they have identical byte representations to integers // and EqULE only cares about LE systems unsafe impl EqULE for f32 {} unsafe impl EqULE for f64 {} // The bool impl is not as efficient as it could be // We can, in the future, have https://github.com/unicode-org/icu4x/blob/main/utils/zerovec/design_doc.md#bitpacking // for better bitpacking // Safety (based on the safety checklist on the ULE trait): // 1. bool does not include any uninitialized or padding bytes (the remaining 7 bytes in bool are by definition zero) // 2. bool is aligned to 1 byte. // 3. The impl of validate_bytes() returns an error if any byte is not valid (bytes that are not 0 or 1). // 4. The impl of validate_bytes() returns an error if there are leftover bytes (never). // 5. The other ULE methods use the default impl. // 6. bool byte equality is semantic equality unsafe impl ULE for bool { #[inline] fn validate_bytes(bytes: &[u8]) -> Result<(), UleError> { for byte in bytes { // https://doc.rust-lang.org/reference/types/boolean.html // Rust booleans are always size 1, align 1 values with valid bit patterns 0x0 or 0x1 if *byte > 1 { return Err(UleError::parse::<Self>()); } } Ok(()) } } impl AsULE for bool { type ULE = Self; #[inline] fn to_unaligned(self) -> Self::ULE { self } #[inline] fn from_unaligned(unaligned: Self::ULE) -> Self { unaligned } } // EqULE is true because bool is its own ULE. unsafe impl EqULE for bool {} // Safety (based on the safety checklist on the ULE trait): // 1. () does not include any uninitialized or padding bytes (it has no bytes) // 2. () is a ZST that is safe to construct // 3. The impl of validate_bytes() returns an error if any byte is not valid (any byte). // 4. The impl of validate_bytes() returns an error if there are leftover bytes (always). // 5. The other ULE methods use the default impl. // 6. () byte equality is semantic equality unsafe impl ULE for () { #[inline] fn validate_bytes(bytes: &[u8]) -> Result<(), UleError> { if bytes.is_empty() { Ok(()) } else { Err(UleError::length::<Self>(bytes.len())) } } } impl AsULE for () { type ULE = Self; #[inline] fn to_unaligned(self) -> Self::ULE { self } #[inline] fn from_unaligned(unaligned: Self::ULE) -> Self { unaligned } } // EqULE is true because () is its own ULE. unsafe impl EqULE for () {}
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use crate::ule::*; // Safety (based on the safety checklist on the ULE trait): // 1. [T; N] does not include any uninitialized or padding bytes since T is ULE // 2. [T; N] is aligned to 1 byte since T is ULE // 3. The impl of validate_bytes() returns an error if any byte is not valid. // 4. The impl of validate_bytes() returns an error if there are leftover bytes. // 5. The other ULE methods use the default impl. // 6. [T; N] byte equality is semantic equality since T is ULE unsafe impl<T: ULE, const N: usize> ULE for [T; N] { #[inline] fn validate_bytes(bytes: &[u8]) -> Result<(), UleError> { // a slice of multiple Selfs is equivalent to just a larger slice of Ts T::validate_bytes(bytes) } } impl<T: AsULE, const N: usize> AsULE for [T; N] { type ULE = [T::ULE; N]; #[inline] fn to_unaligned(self) -> Self::ULE { self.map(T::to_unaligned) } #[inline] fn from_unaligned(unaligned: Self::ULE) -> Self { unaligned.map(T::from_unaligned) } } unsafe impl<T: EqULE, const N: usize> EqULE for [T; N] {} // Safety (based on the safety checklist on the VarULE trait): // 1. str does not include any uninitialized or padding bytes. // 2. str is aligned to 1 byte. // 3. The impl of `validate_bytes()` returns an error if any byte is not valid. // 4. The impl of `validate_bytes()` returns an error if the slice cannot be used in its entirety // 5. The impl of `from_bytes_unchecked()` returns a reference to the same data. // 6. `parse_bytes()` is equivalent to `validate_bytes()` followed by `from_bytes_unchecked()` // 7. str byte equality is semantic equality unsafe impl VarULE for str { #[inline] fn validate_bytes(bytes: &[u8]) -> Result<(), UleError> { core::str::from_utf8(bytes).map_err(|_| UleError::parse::<Self>())?; Ok(()) } #[inline] fn parse_bytes(bytes: &[u8]) -> Result<&Self, UleError> { core::str::from_utf8(bytes).map_err(|_| UleError::parse::<Self>()) } /// Invariant: must be safe to call when called on a slice that previously /// succeeded with `parse_bytes` #[inline] unsafe fn from_bytes_unchecked(bytes: &[u8]) -> &Self { core::str::from_utf8_unchecked(bytes) } } /// Note: VarULE is well-defined for all `[T]` where `T: ULE`, but [`ZeroSlice`] is more ergonomic /// when `T` is a low-level ULE type. For example: /// /// ```no_run /// # use zerovec::ZeroSlice; /// # use zerovec::VarZeroVec; /// # use zerovec::ule::AsULE; /// // OK: [u8] is a useful type /// let _: VarZeroVec<[u8]> = unimplemented!(); /// /// // Technically works, but [u32::ULE] is not very useful /// let _: VarZeroVec<[<u32 as AsULE>::ULE]> = unimplemented!(); /// /// // Better: ZeroSlice<u32> /// let _: VarZeroVec<ZeroSlice<u32>> = unimplemented!(); /// ``` /// /// [`ZeroSlice`]: crate::ZeroSlice // Safety (based on the safety checklist on the VarULE trait): // 1. [T] does not include any uninitialized or padding bytes (achieved by being a slice of a ULE type) // 2. [T] is aligned to 1 byte (achieved by being a slice of a ULE type) // 3. The impl of `validate_bytes()` returns an error if any byte is not valid. // 4. The impl of `validate_bytes()` returns an error if the slice cannot be used in its entirety // 5. The impl of `from_bytes_unchecked()` returns a reference to the same data. // 6. All other methods are defaulted // 7. `[T]` byte equality is semantic equality (achieved by being a slice of a ULE type) unsafe impl<T> VarULE for [T] where T: ULE, { #[inline] fn validate_bytes(slice: &[u8]) -> Result<(), UleError> { T::validate_bytes(slice) } #[inline] unsafe fn from_bytes_unchecked(bytes: &[u8]) -> &Self { T::slice_from_bytes_unchecked(bytes) } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). /// Take a VarULE type and serialize it both in human and machine readable contexts, /// and ensure it roundtrips correctly /// /// Note that the concrete type may need to be explicitly specified to prevent issues with /// https://github.com/rust-lang/rust/issues/130180 #[cfg(feature = "serde")] pub(crate) fn assert_serde_roundtrips<T>(var: &T) where T: crate::ule::VarULE + ?Sized + serde::Serialize, for<'a> Box<T>: serde::Deserialize<'a>, for<'a> &'a T: serde::Deserialize<'a>, T: core::fmt::Debug + PartialEq, { let bincode = bincode::serialize(var).unwrap(); let deserialized: &T = bincode::deserialize(&bincode).unwrap(); let deserialized_box: Box<T> = bincode::deserialize(&bincode).unwrap(); assert_eq!(var, deserialized, "Single element roundtrips with bincode"); assert_eq!( var, &*deserialized_box, "Single element roundtrips with bincode" ); let json = serde_json::to_string(var).unwrap(); let deserialized: Box<T> = serde_json::from_str(&json).unwrap(); assert_eq!(var, &*deserialized, "Single element roundtrips with serde"); }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). //! ULE impls for tuples. //! //! Rust does not guarantee the layout of tuples, so ZeroVec defines its own tuple ULE types. //! //! Impls are defined for tuples of up to 6 elements. For longer tuples, use a custom struct //! with [`#[make_ule]`](crate::make_ule). //! //! # Examples //! //! ``` //! use zerovec::ZeroVec; //! //! // ZeroVec of tuples! //! let zerovec: ZeroVec<(u32, char)> = [(1, 'a'), (1234901, '啊'), (100, 'अ')] //! .iter() //! .copied() //! .collect(); //! //! assert_eq!(zerovec.get(1), Some((1234901, '啊'))); //! ``` use super::*; use core::fmt; use core::mem; macro_rules! tuple_ule { ($name:ident, $len:literal, [ $($t:ident $i:tt),+ ]) => { #[doc = concat!("ULE type for tuples with ", $len, " elements.")] #[repr(C, packed)] #[allow(clippy::exhaustive_structs)] // stable pub struct $name<$($t),+>($(pub $t),+); // Safety (based on the safety checklist on the ULE trait): // 1. TupleULE does not include any uninitialized or padding bytes. // (achieved by `#[repr(C, packed)]` on a struct containing only ULE fields) // 2. TupleULE is aligned to 1 byte. // (achieved by `#[repr(C, packed)]` on a struct containing only ULE fields) // 3. The impl of validate_bytes() returns an error if any byte is not valid. // 4. The impl of validate_bytes() returns an error if there are extra bytes. // 5. The other ULE methods use the default impl. // 6. TupleULE byte equality is semantic equality by relying on the ULE equality // invariant on the subfields unsafe impl<$($t: ULE),+> ULE for $name<$($t),+> { fn validate_bytes(bytes: &[u8]) -> Result<(), UleError> { // expands to: 0size + mem::size_of::<A>() + mem::size_of::<B>(); let ule_bytes = 0usize $(+ mem::size_of::<$t>())+; if bytes.len() % ule_bytes != 0 { return Err(UleError::length::<Self>(bytes.len())); } for chunk in bytes.chunks(ule_bytes) { let mut i = 0; $( let j = i; i += mem::size_of::<$t>(); #[allow(clippy::indexing_slicing)] // length checked <$t>::validate_bytes(&chunk[j..i])?; )+ } Ok(()) } } impl<$($t: AsULE),+> AsULE for ($($t),+) { type ULE = $name<$(<$t>::ULE),+>; #[inline] fn to_unaligned(self) -> Self::ULE { $name($( self.$i.to_unaligned() ),+) } #[inline] fn from_unaligned(unaligned: Self::ULE) -> Self { ($( <$t>::from_unaligned(unaligned.$i) ),+) } } impl<$($t: fmt::Debug + ULE),+> fmt::Debug for $name<$($t),+> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { ($(self.$i),+).fmt(f) } } // We need manual impls since `#[derive()]` is disallowed on packed types impl<$($t: PartialEq + ULE),+> PartialEq for $name<$($t),+> { fn eq(&self, other: &Self) -> bool { ($(self.$i),+).eq(&($(other.$i),+)) } } impl<$($t: Eq + ULE),+> Eq for $name<$($t),+> {} impl<$($t: PartialOrd + ULE),+> PartialOrd for $name<$($t),+> { fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> { ($(self.$i),+).partial_cmp(&($(other.$i),+)) } } impl<$($t: Ord + ULE),+> Ord for $name<$($t),+> { fn cmp(&self, other: &Self) -> core::cmp::Ordering { ($(self.$i),+).cmp(&($(other.$i),+)) } } impl<$($t: ULE),+> Clone for $name<$($t),+> { fn clone(&self) -> Self { *self } } impl<$($t: ULE),+> Copy for $name<$($t),+> {} #[cfg(feature = "alloc")] impl<'a, $($t: Ord + AsULE + 'static),+> crate::map::ZeroMapKV<'a> for ($($t),+) { type Container = crate::ZeroVec<'a, ($($t),+)>; type Slice = crate::ZeroSlice<($($t),+)>; type GetType = $name<$(<$t>::ULE),+>; type OwnedType = ($($t),+); } }; } tuple_ule!(Tuple2ULE, "2", [ A 0, B 1 ]); tuple_ule!(Tuple3ULE, "3", [ A 0, B 1, C 2 ]); tuple_ule!(Tuple4ULE, "4", [ A 0, B 1, C 2, D 3 ]); tuple_ule!(Tuple5ULE, "5", [ A 0, B 1, C 2, D 3, E 4 ]); tuple_ule!(Tuple6ULE, "6", [ A 0, B 1, C 2, D 3, E 4, F 5 ]); #[test] fn test_pairule_validate() { use crate::ZeroVec; let vec: Vec<(u32, char)> = vec![(1, 'a'), (1234901, '啊'), (100, 'अ')]; let zerovec: ZeroVec<(u32, char)> = vec.iter().copied().collect(); let bytes = zerovec.as_bytes(); let zerovec2 = ZeroVec::parse_bytes(bytes).unwrap(); assert_eq!(zerovec, zerovec2); // Test failed validation with a correctly sized but differently constrained tuple // Note: 1234901 is not a valid char let zerovec3 = ZeroVec::<(char, u32)>::parse_bytes(bytes); assert!(zerovec3.is_err()); } #[test] fn test_tripleule_validate() { use crate::ZeroVec; let vec: Vec<(u32, char, i8)> = vec![(1, 'a', -5), (1234901, '啊', 3), (100, 'अ', -127)]; let zerovec: ZeroVec<(u32, char, i8)> = vec.iter().copied().collect(); let bytes = zerovec.as_bytes(); let zerovec2 = ZeroVec::parse_bytes(bytes).unwrap(); assert_eq!(zerovec, zerovec2); // Test failed validation with a correctly sized but differently constrained tuple // Note: 1234901 is not a valid char let zerovec3 = ZeroVec::<(char, i8, u32)>::parse_bytes(bytes); assert!(zerovec3.is_err()); } #[test] fn test_quadule_validate() { use crate::ZeroVec; let vec: Vec<(u32, char, i8, u16)> = vec![(1, 'a', -5, 3), (1234901, '啊', 3, 11), (100, 'अ', -127, 0)]; let zerovec: ZeroVec<(u32, char, i8, u16)> = vec.iter().copied().collect(); let bytes = zerovec.as_bytes(); let zerovec2 = ZeroVec::parse_bytes(bytes).unwrap(); assert_eq!(zerovec, zerovec2); // Test failed validation with a correctly sized but differently constrained tuple // Note: 1234901 is not a valid char let zerovec3 = ZeroVec::<(char, i8, u16, u32)>::parse_bytes(bytes); assert!(zerovec3.is_err()); }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). //! [`VarULE`] impls for tuples. //! //! This module exports [`Tuple2VarULE`], [`Tuple3VarULE`], ..., the corresponding [`VarULE`] types //! of tuples containing purely [`VarULE`] types. //! //! This can be paired with [`VarTupleULE`] to make arbitrary combinations of [`ULE`] and [`VarULE`] types. //! //! [`VarTupleULE`]: crate::ule::vartuple::VarTupleULE use super::*; use crate::varzerovec::{Index16, VarZeroVecFormat}; use core::fmt; use core::marker::PhantomData; use core::mem; use zerofrom::ZeroFrom; macro_rules! tuple_varule { // Invocation: Should be called like `tuple_ule!(Tuple2VarULE, 2, [ A a AX 0, B b BX 1 ])` // // $T is a generic name, $t is a lowercase version of it, $T_alt is an "alternate" name to use when we need two types referring // to the same input field, $i is an index. // // $name is the name of the type, $len MUST be the total number of fields, and then $i must be an integer going from 0 to (n - 1) in sequence // (This macro code can rely on $i < $len) ($name:ident, $len:literal, [ $($T:ident $t:ident $T_alt: ident $i:tt),+ ]) => { #[doc = concat!("VarULE type for tuples with ", $len, " elements. See module docs for more information")] #[repr(transparent)] #[allow(clippy::exhaustive_structs)] // stable pub struct $name<$($T: ?Sized,)+ Format: VarZeroVecFormat = Index16> { $($t: PhantomData<$T>,)+ // Safety invariant: Each "field" $i of the MultiFieldsULE is a valid instance of $t // // In other words, calling `.get_field::<$T>($i)` is always safe. // // This invariant is upheld when this type is constructed during VarULE parsing/validation multi: MultiFieldsULE<$len, Format> } impl<$($T: VarULE + ?Sized,)+ Format: VarZeroVecFormat> $name<$($T,)+ Format> { $( #[doc = concat!("Get field ", $i, "of this tuple")] pub fn $t(&self) -> &$T { // Safety: See invariant of `multi`. unsafe { self.multi.get_field::<$T>($i) } } )+ } // # Safety // // ## Checklist // // Safety checklist for `VarULE`: // // 1. align(1): repr(transparent) around an align(1) VarULE type: MultiFieldsULE // 2. No padding: see previous point // 3. `validate_bytes` validates that this type is a valid MultiFieldsULE, and that each field is the correct type from the tuple. // 4. `validate_bytes` checks length by deferring to the inner ULEs // 5. `from_bytes_unchecked` returns a fat pointer to the bytes. // 6. All other methods are left at their default impl. // 7. The inner ULEs have byte equality, so this composition has byte equality. unsafe impl<$($T: VarULE + ?Sized,)+ Format: VarZeroVecFormat> VarULE for $name<$($T,)+ Format> { fn validate_bytes(bytes: &[u8]) -> Result<(), UleError> { // Safety: We validate that this type is the same kind of MultiFieldsULE (with $len, Format) // as in the type def let multi = <MultiFieldsULE<$len, Format> as VarULE>::parse_bytes(bytes)?; $( // Safety invariant: $i < $len, from the macro invocation unsafe { multi.validate_field::<$T>($i)?; } )+ Ok(()) } unsafe fn from_bytes_unchecked(bytes: &[u8]) -> &Self { // Safety: We validate that this type is the same kind of MultiFieldsULE (with $len, Format) // as in the type def let multi = <MultiFieldsULE<$len, Format> as VarULE>::from_bytes_unchecked(bytes); // This type is repr(transparent) over MultiFieldsULE<$len>, so its slices can be transmuted // Field invariant upheld here: validate_bytes above validates every field for being the right type mem::transmute::<&MultiFieldsULE<$len, Format>, &Self>(multi) } } impl<$($T: fmt::Debug + VarULE + ?Sized,)+ Format: VarZeroVecFormat> fmt::Debug for $name<$($T,)+ Format> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { ($(self.$t(),)+).fmt(f) } } // We need manual impls since `#[derive()]` is disallowed on packed types impl<$($T: PartialEq + VarULE + ?Sized,)+ Format: VarZeroVecFormat> PartialEq for $name<$($T,)+ Format> { fn eq(&self, other: &Self) -> bool { ($(self.$t(),)+).eq(&($(other.$t(),)+)) } } impl<$($T: Eq + VarULE + ?Sized,)+ Format: VarZeroVecFormat> Eq for $name<$($T,)+ Format> {} impl<$($T: PartialOrd + VarULE + ?Sized,)+ Format: VarZeroVecFormat> PartialOrd for $name<$($T,)+ Format> { fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> { ($(self.$t(),)+).partial_cmp(&($(other.$t(),)+)) } } impl<$($T: Ord + VarULE + ?Sized,)+ Format: VarZeroVecFormat> Ord for $name<$($T,)+ Format> { fn cmp(&self, other: &Self) -> core::cmp::Ordering { ($(self.$t(),)+).cmp(&($(other.$t(),)+)) } } // # Safety // // encode_var_ule_len: returns the length of the individual VarULEs together. // // encode_var_ule_write: writes bytes by deferring to the inner VarULE impls. unsafe impl<$($T,)+ $($T_alt,)+ Format> EncodeAsVarULE<$name<$($T,)+ Format>> for ( $($T_alt),+ ) where $($T: VarULE + ?Sized,)+ $($T_alt: EncodeAsVarULE<$T>,)+ Format: VarZeroVecFormat, { fn encode_var_ule_as_slices<R>(&self, _: impl FnOnce(&[&[u8]]) -> R) -> R { // unnecessary if the other two are implemented unreachable!() } #[inline] fn encode_var_ule_len(&self) -> usize { // Safety: We validate that this type is the same kind of MultiFieldsULE (with $len, Format) // as in the type def MultiFieldsULE::<$len, Format>::compute_encoded_len_for([$(self.$i.encode_var_ule_len()),+]) } #[inline] fn encode_var_ule_write(&self, dst: &mut [u8]) { let lengths = [$(self.$i.encode_var_ule_len()),+]; // Safety: We validate that this type is the same kind of MultiFieldsULE (with $len, Format) // as in the type def let multi = MultiFieldsULE::<$len, Format>::new_from_lengths_partially_initialized(lengths, dst); $( // Safety: $i < $len, from the macro invocation, and field $i is supposed to be of type $T unsafe { multi.set_field_at::<$T, $T_alt>($i, &self.$i); } )+ } } #[cfg(feature = "alloc")] impl<$($T: VarULE + ?Sized,)+ Format: VarZeroVecFormat> alloc::borrow::ToOwned for $name<$($T,)+ Format> { type Owned = alloc::boxed::Box<Self>; fn to_owned(&self) -> Self::Owned { encode_varule_to_box(self) } } impl<'a, $($T,)+ $($T_alt,)+ Format> ZeroFrom <'a, $name<$($T,)+ Format>> for ($($T_alt),+) where $($T: VarULE + ?Sized,)+ $($T_alt: ZeroFrom<'a, $T>,)+ Format: VarZeroVecFormat { fn zero_from(other: &'a $name<$($T,)+ Format>) -> Self { ( $($T_alt::zero_from(other.$t()),)+ ) } } #[cfg(feature = "serde")] impl<$($T: serde::Serialize,)+ Format> serde::Serialize for $name<$($T,)+ Format> where $($T: VarULE + ?Sized,)+ // This impl should be present on almost all VarULE types. if it isn't, that is a bug $(for<'a> &'a $T: ZeroFrom<'a, $T>,)+ Format: VarZeroVecFormat { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer { if serializer.is_human_readable() { let this = ( $(self.$t()),+ ); <($(&$T),+) as serde::Serialize>::serialize(&this, serializer) } else { serializer.serialize_bytes(self.multi.as_bytes()) } } } #[cfg(feature = "serde")] impl<'de, $($T: VarULE + ?Sized,)+ Format> serde::Deserialize<'de> for alloc::boxed::Box<$name<$($T,)+ Format>> where // This impl should be present on almost all deserializable VarULE types $( alloc::boxed::Box<$T>: serde::Deserialize<'de>,)+ Format: VarZeroVecFormat { fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error> where Des: serde::Deserializer<'de> { if deserializer.is_human_readable() { let this = <( $(alloc::boxed::Box<$T>),+) as serde::Deserialize>::deserialize(deserializer)?; let this_ref = ( $(&*this.$i),+ ); Ok(crate::ule::encode_varule_to_box(&this_ref)) } else { // This branch should usually not be hit, since Cow-like use cases will hit the Deserialize impl for &'a TupleNVarULE instead. let deserialized = <&$name<$($T,)+ Format>>::deserialize(deserializer)?; Ok(deserialized.to_boxed()) } } } #[cfg(feature = "serde")] impl<'a, 'de: 'a, $($T: VarULE + ?Sized,)+ Format: VarZeroVecFormat> serde::Deserialize<'de> for &'a $name<$($T,)+ Format> { fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error> where Des: serde::Deserializer<'de> { if deserializer.is_human_readable() { Err(serde::de::Error::custom( concat!("&", stringify!($name), " can only deserialize in zero-copy ways"), )) } else { let bytes = <&[u8]>::deserialize(deserializer)?; $name::<$($T,)+ Format>::parse_bytes(bytes).map_err(serde::de::Error::custom) } } } }; } tuple_varule!(Tuple2VarULE, 2, [ A a AE 0, B b BE 1 ]); tuple_varule!(Tuple3VarULE, 3, [ A a AE 0, B b BE 1, C c CE 2 ]); tuple_varule!(Tuple4VarULE, 4, [ A a AE 0, B b BE 1, C c CE 2, D d DE 3 ]); tuple_varule!(Tuple5VarULE, 5, [ A a AE 0, B b BE 1, C c CE 2, D d DE 3, E e EE 4 ]); tuple_varule!(Tuple6VarULE, 6, [ A a AE 0, B b BE 1, C c CE 2, D d DE 3, E e EE 4, F f FE 5 ]); #[cfg(test)] mod tests { use super::*; use crate::varzerovec::{Index16, Index32, Index8, VarZeroVecFormat}; use crate::VarZeroSlice; use crate::VarZeroVec; #[test] fn test_pairvarule_validate() { let vec: Vec<(&str, &[u8])> = vec![("a", b"b"), ("foo", b"bar"), ("lorem", b"ipsum\xFF")]; let zerovec: VarZeroVec<Tuple2VarULE<str, [u8]>> = (&vec).into(); let bytes = zerovec.as_bytes(); let zerovec2 = VarZeroVec::parse_bytes(bytes).unwrap(); assert_eq!(zerovec, zerovec2); // Test failed validation with a correctly sized but differently constrained tuple // Note: ipsum\xFF is not a valid str let zerovec3 = VarZeroVec::<Tuple2VarULE<str, str>>::parse_bytes(bytes); assert!(zerovec3.is_err()); #[cfg(feature = "serde")] for val in zerovec.iter() { // Can't use inference due to https://github.com/rust-lang/rust/issues/130180 crate::ule::test_utils::assert_serde_roundtrips::<Tuple2VarULE<str, [u8]>>(val); } } fn test_tripleule_validate_inner<Format: VarZeroVecFormat>() { let vec: Vec<(&str, &[u8], VarZeroVec<str>)> = vec![ ("a", b"b", (&vec!["a", "b", "c"]).into()), ("foo", b"bar", (&vec!["baz", "quux"]).into()), ( "lorem", b"ipsum\xFF", (&vec!["dolor", "sit", "amet"]).into(), ), ]; let zerovec: VarZeroVec<Tuple3VarULE<str, [u8], VarZeroSlice<str>, Format>> = (&vec).into(); let bytes = zerovec.as_bytes(); let zerovec2 = VarZeroVec::parse_bytes(bytes).unwrap(); assert_eq!(zerovec, zerovec2); // Test failed validation with a correctly sized but differently constrained tuple // Note: the str is unlikely to be a valid varzerovec let zerovec3 = VarZeroVec::<Tuple3VarULE<VarZeroSlice<str>, [u8], VarZeroSlice<str>, Format>>::parse_bytes(bytes); assert!(zerovec3.is_err()); #[cfg(feature = "serde")] for val in zerovec.iter() { // Can't use inference due to https://github.com/rust-lang/rust/issues/130180 crate::ule::test_utils::assert_serde_roundtrips::< Tuple3VarULE<str, [u8], VarZeroSlice<str>, Format>, >(val); } } #[test] fn test_tripleule_validate() { test_tripleule_validate_inner::<Index8>(); test_tripleule_validate_inner::<Index16>(); test_tripleule_validate_inner::<Index32>(); } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). //! Types to help compose fixed-size [`ULE`] and variable-size [`VarULE`] primitives. //! //! This module exports [`VarTuple`] and [`VarTupleULE`], which allow a single sized type and //! a single unsized type to be stored together as a [`VarULE`]. //! //! # Examples //! //! ``` //! use zerovec::ule::vartuple::{VarTuple, VarTupleULE}; //! use zerovec::VarZeroVec; //! //! struct Employee<'a> { //! id: u32, //! name: &'a str, //! }; //! //! let employees = [ //! Employee { //! id: 12345, //! name: "Jane Doe", //! }, //! Employee { //! id: 67890, //! name: "John Doe", //! }, //! ]; //! //! let employees_as_var_tuples = employees //! .into_iter() //! .map(|x| VarTuple { //! sized: x.id, //! variable: x.name, //! }) //! .collect::<Vec<_>>(); //! //! let employees_vzv: VarZeroVec<VarTupleULE<u32, str>> = //! employees_as_var_tuples.as_slice().into(); //! //! assert_eq!(employees_vzv.len(), 2); //! //! assert_eq!(employees_vzv.get(0).unwrap().sized.as_unsigned_int(), 12345); //! assert_eq!(&employees_vzv.get(0).unwrap().variable, "Jane Doe"); //! //! assert_eq!(employees_vzv.get(1).unwrap().sized.as_unsigned_int(), 67890); //! assert_eq!(&employees_vzv.get(1).unwrap().variable, "John Doe"); //! ``` use core::mem::{size_of, transmute_copy}; use zerofrom::ZeroFrom; use super::{AsULE, EncodeAsVarULE, UleError, VarULE, ULE}; /// A sized type that can be converted to a [`VarTupleULE`]. /// /// See the module for examples. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] #[allow(clippy::exhaustive_structs)] // well-defined type #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct VarTuple<A, B> { pub sized: A, pub variable: B, } /// A dynamically-sized type combining a sized and an unsized type. /// /// See the module for examples. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] #[allow(clippy::exhaustive_structs)] // well-defined type #[repr(C)] pub struct VarTupleULE<A: AsULE, V: VarULE + ?Sized> { pub sized: A::ULE, pub variable: V, } // # Safety // // ## Representation // // The type `VarTupleULE` is align(1) because it is repr(C) and its fields // are all align(1), since they are themselves ULE and VarULE, which have // this same safety constraint. Further, there is no padding, because repr(C) // does not add padding when all fields are align(1). // // <https://doc.rust-lang.org/reference/type-layout.html#the-c-representation> // // Pointers to `VarTupleULE` are fat pointers with metadata equal to the // metadata of the inner DST field V. // // <https://doc.rust-lang.org/stable/std/ptr/trait.Pointee.html> // // ## Checklist // // Safety checklist for `VarULE`: // // 1. align(1): see "Representation" above. // 2. No padding: see "Representation" above. // 3. `validate_bytes` checks length and defers to the inner ULEs. // 4. `validate_bytes` checks length and defers to the inner ULEs. // 5. `from_bytes_unchecked` returns a fat pointer to the bytes. // 6. All other methods are left at their default impl. // 7. The two ULEs have byte equality, so this composition has byte equality. unsafe impl<A, V> VarULE for VarTupleULE<A, V> where A: AsULE + 'static, V: VarULE + ?Sized, { fn validate_bytes(bytes: &[u8]) -> Result<(), UleError> { let (sized_chunk, variable_chunk) = bytes .split_at_checked(size_of::<A::ULE>()) .ok_or(UleError::length::<Self>(bytes.len()))?; A::ULE::validate_bytes(sized_chunk)?; V::validate_bytes(variable_chunk)?; Ok(()) } unsafe fn from_bytes_unchecked(bytes: &[u8]) -> &Self { let (_sized_chunk, variable_chunk) = bytes.split_at_unchecked(size_of::<A::ULE>()); // Safety: variable_chunk is a valid V because of this function's precondition: bytes is a valid Self, // and a valid Self contains a valid V after the space needed for A::ULE. let variable_ref = V::from_bytes_unchecked(variable_chunk); let variable_ptr: *const V = variable_ref; // Safety: The DST of VarTupleULE is a pointer to the `sized` element and has a metadata // equal to the metadata of the `variable` field (see "Representation" comments on the impl). // We should use the pointer metadata APIs here when they are stable: https://github.com/rust-lang/rust/issues/81513 // For now we rely on all DST metadata being a usize. // Extract metadata from V's DST // Rust doesn't know that `&V` is a fat pointer so we have to use transmute_copy assert_eq!(size_of::<*const V>(), size_of::<(*const u8, usize)>()); // Safety: We have asserted that the transmute Src and Dst are the same size. Furthermore, // DST pointers are a pointer and usize length metadata let (_v_ptr, metadata) = transmute_copy::<*const V, (*const u8, usize)>(&variable_ptr); // Construct a new DST with the same metadata as V assert_eq!(size_of::<*const Self>(), size_of::<(*const u8, usize)>()); // Safety: Same as above but in the other direction. let composed_ptr = transmute_copy::<(*const u8, usize), *const Self>(&(bytes.as_ptr(), metadata)); &*(composed_ptr) } } // # Safety // // encode_var_ule_len: returns the length of the two ULEs together. // // encode_var_ule_write: writes bytes by deferring to the inner ULE impls. unsafe impl<A, B, V> EncodeAsVarULE<VarTupleULE<A, V>> for VarTuple<A, B> where A: AsULE + 'static, B: EncodeAsVarULE<V>, V: VarULE + ?Sized, { fn encode_var_ule_as_slices<R>(&self, _: impl FnOnce(&[&[u8]]) -> R) -> R { // unnecessary if the other two are implemented unreachable!() } #[inline] fn encode_var_ule_len(&self) -> usize { size_of::<A::ULE>() + self.variable.encode_var_ule_len() } #[inline] fn encode_var_ule_write(&self, dst: &mut [u8]) { // TODO: use split_first_chunk_mut in 1.77 let (sized_chunk, variable_chunk) = dst.split_at_mut(size_of::<A::ULE>()); sized_chunk.clone_from_slice([self.sized.to_unaligned()].as_bytes()); self.variable.encode_var_ule_write(variable_chunk); } } #[cfg(feature = "alloc")] impl<A, V> alloc::borrow::ToOwned for VarTupleULE<A, V> where A: AsULE + 'static, V: VarULE + ?Sized, { type Owned = alloc::boxed::Box<Self>; fn to_owned(&self) -> Self::Owned { crate::ule::encode_varule_to_box(self) } } impl<'a, A, B, V> ZeroFrom<'a, VarTupleULE<A, V>> for VarTuple<A, B> where A: AsULE + 'static, V: VarULE + ?Sized, B: ZeroFrom<'a, V>, { fn zero_from(other: &'a VarTupleULE<A, V>) -> Self { VarTuple { sized: AsULE::from_unaligned(other.sized), variable: B::zero_from(&other.variable), } } } #[cfg(feature = "serde")] impl<A, V> serde::Serialize for VarTupleULE<A, V> where A: AsULE + 'static, V: VarULE + ?Sized, A: serde::Serialize, V: serde::Serialize, { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { if serializer.is_human_readable() { let this = VarTuple { sized: A::from_unaligned(self.sized), variable: &self.variable, }; this.serialize(serializer) } else { serializer.serialize_bytes(self.as_bytes()) } } } #[cfg(feature = "serde")] impl<'a, 'de: 'a, A, V> serde::Deserialize<'de> for &'a VarTupleULE<A, V> where A: AsULE + 'static, V: VarULE + ?Sized, A: serde::Deserialize<'de>, { fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error> where Des: serde::Deserializer<'de>, { if !deserializer.is_human_readable() { let bytes = <&[u8]>::deserialize(deserializer)?; VarTupleULE::<A, V>::parse_bytes(bytes).map_err(serde::de::Error::custom) } else { Err(serde::de::Error::custom( "&VarTupleULE can only deserialize in zero-copy ways", )) } } } #[cfg(feature = "serde")] impl<'de, A, V> serde::Deserialize<'de> for alloc::boxed::Box<VarTupleULE<A, V>> where A: AsULE + 'static, V: VarULE + ?Sized, A: serde::Deserialize<'de>, alloc::boxed::Box<V>: serde::Deserialize<'de>, { fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error> where Des: serde::Deserializer<'de>, { if deserializer.is_human_readable() { let this = VarTuple::<A, alloc::boxed::Box<V>>::deserialize(deserializer)?; Ok(crate::ule::encode_varule_to_box(&this)) } else { // This branch should usually not be hit, since Cow-like use cases will hit the Deserialize impl for &'a TupleNVarULE instead. let deserialized = <&VarTupleULE<A, V>>::deserialize(deserializer)?; Ok(deserialized.to_boxed()) } } } #[test] fn test_simple() { let var_tuple = VarTuple { sized: 1500u16, variable: "hello", }; let var_tuple_ule = super::encode_varule_to_box(&var_tuple); assert_eq!(var_tuple_ule.sized.as_unsigned_int(), 1500); assert_eq!(&var_tuple_ule.variable, "hello"); // Can't use inference due to https://github.com/rust-lang/rust/issues/130180 #[cfg(feature = "serde")] crate::ule::test_utils::assert_serde_roundtrips::<VarTupleULE<u16, str>>(&var_tuple_ule); } #[test] fn test_nested() { use crate::{ZeroSlice, ZeroVec}; let var_tuple = VarTuple { sized: 2000u16, variable: VarTuple { sized: '🦙', variable: ZeroVec::alloc_from_slice(b"ICU"), }, }; let var_tuple_ule = super::encode_varule_to_box(&var_tuple); assert_eq!(var_tuple_ule.sized.as_unsigned_int(), 2000u16); assert_eq!(var_tuple_ule.variable.sized.to_char(), '🦙'); assert_eq!( &var_tuple_ule.variable.variable, ZeroSlice::from_ule_slice(b"ICU") ); // Can't use inference due to https://github.com/rust-lang/rust/issues/130180 #[cfg(feature = "serde")] crate::ule::test_utils::assert_serde_roundtrips::< VarTupleULE<u16, VarTupleULE<char, ZeroSlice<_>>>, >(&var_tuple_ule); }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use super::VarZeroVecFormatError; use crate::ule::*; use core::cmp::Ordering; use core::convert::TryFrom; use core::marker::PhantomData; use core::mem; use core::ops::Range; /// This trait allows switching between different possible internal /// representations of VarZeroVec. /// /// Currently this crate supports three formats: [`Index8`], [`Index16`] and [`Index32`], /// with [`Index16`] being the default for all [`VarZeroVec`](super::VarZeroVec) /// types unless explicitly specified otherwise. /// /// Do not implement this trait, its internals may be changed in the future, /// and all of its associated items are hidden from the docs. pub trait VarZeroVecFormat: 'static + Sized { /// The type to use for the indexing array /// /// Safety: must be a ULE for which all byte sequences are allowed #[doc(hidden)] type Index: IntegerULE; /// The type to use for the length segment /// /// Safety: must be a ULE for which all byte sequences are allowed #[doc(hidden)] type Len: IntegerULE; } /// This trait represents various ULE types that can be used to represent an integer /// /// Do not implement this trait, its internals may be changed in the future, /// and all of its associated items are hidden from the docs. #[allow(clippy::missing_safety_doc)] // no safety section for you, don't implement this trait period #[doc(hidden)] pub unsafe trait IntegerULE: ULE { /// The error to show when unable to construct a vec #[doc(hidden)] const TOO_LARGE_ERROR: &'static str; /// Safety: must be sizeof(self) #[doc(hidden)] const SIZE: usize; /// Safety: must be maximum integral value represented here #[doc(hidden)] const MAX_VALUE: u32; /// Safety: Must roundtrip with from_usize and represent the correct /// integral value #[doc(hidden)] fn iule_to_usize(self) -> usize; #[doc(hidden)] fn iule_from_usize(x: usize) -> Option<Self>; /// Safety: Should always convert a buffer into an array of Self with the correct length #[doc(hidden)] #[cfg(feature = "alloc")] fn iule_from_bytes_unchecked_mut(bytes: &mut [u8]) -> &mut [Self]; } /// This is a [`VarZeroVecFormat`] that stores u8s in the index array, and a u8 for a length. /// /// Will have a smaller data size, but it's *extremely* likely for larger arrays /// to be unrepresentable (and error on construction). Should probably be used /// for known-small arrays, where all but the last field are known-small. #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] #[allow(clippy::exhaustive_structs)] // marker pub struct Index8; /// This is a [`VarZeroVecFormat`] that stores u16s in the index array, and a u16 for a length. /// /// Will have a smaller data size, but it's more likely for larger arrays /// to be unrepresentable (and error on construction) /// /// This is the default index size used by all [`VarZeroVec`](super::VarZeroVec) types. #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] #[allow(clippy::exhaustive_structs)] // marker pub struct Index16; /// This is a [`VarZeroVecFormat`] that stores u32s in the index array, and a u32 for a length. /// Will have a larger data size, but will support large arrays without /// problems. #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] #[allow(clippy::exhaustive_structs)] // marker pub struct Index32; impl VarZeroVecFormat for Index8 { type Index = u8; type Len = u8; } impl VarZeroVecFormat for Index16 { type Index = RawBytesULE<2>; type Len = RawBytesULE<2>; } impl VarZeroVecFormat for Index32 { type Index = RawBytesULE<4>; type Len = RawBytesULE<4>; } unsafe impl IntegerULE for u8 { const TOO_LARGE_ERROR: &'static str = "Attempted to build VarZeroVec out of elements that \ cumulatively are larger than a u8 in size"; const SIZE: usize = mem::size_of::<Self>(); const MAX_VALUE: u32 = u8::MAX as u32; #[inline] fn iule_to_usize(self) -> usize { self as usize } #[inline] fn iule_from_usize(u: usize) -> Option<Self> { u8::try_from(u).ok() } #[inline] #[cfg(feature = "alloc")] fn iule_from_bytes_unchecked_mut(bytes: &mut [u8]) -> &mut [Self] { bytes } } unsafe impl IntegerULE for RawBytesULE<2> { const TOO_LARGE_ERROR: &'static str = "Attempted to build VarZeroVec out of elements that \ cumulatively are larger than a u16 in size"; const SIZE: usize = mem::size_of::<Self>(); const MAX_VALUE: u32 = u16::MAX as u32; #[inline] fn iule_to_usize(self) -> usize { self.as_unsigned_int() as usize } #[inline] fn iule_from_usize(u: usize) -> Option<Self> { u16::try_from(u).ok().map(u16::to_unaligned) } #[inline] #[cfg(feature = "alloc")] fn iule_from_bytes_unchecked_mut(bytes: &mut [u8]) -> &mut [Self] { Self::from_bytes_unchecked_mut(bytes) } } unsafe impl IntegerULE for RawBytesULE<4> { const TOO_LARGE_ERROR: &'static str = "Attempted to build VarZeroVec out of elements that \ cumulatively are larger than a u32 in size"; const SIZE: usize = mem::size_of::<Self>(); const MAX_VALUE: u32 = u32::MAX; #[inline] fn iule_to_usize(self) -> usize { self.as_unsigned_int() as usize } #[inline] fn iule_from_usize(u: usize) -> Option<Self> { u32::try_from(u).ok().map(u32::to_unaligned) } #[inline] #[cfg(feature = "alloc")] fn iule_from_bytes_unchecked_mut(bytes: &mut [u8]) -> &mut [Self] { Self::from_bytes_unchecked_mut(bytes) } } /// A more parsed version of `VarZeroSlice`. This type is where most of the VarZeroVec /// internal representation code lies. /// /// This is *basically* an `&'a [u8]` to a zero copy buffer, but split out into /// the buffer components. Logically this is capable of behaving as /// a `&'a [T::VarULE]`, but since `T::VarULE` is unsized that type does not actually /// exist. /// /// See [`VarZeroVecComponents::parse_bytes()`] for information on the internal invariants involved #[derive(Debug)] pub struct VarZeroVecComponents<'a, T: ?Sized, F> { /// The number of elements len: u32, /// The list of indices into the `things` slice /// Since the first element is always at things[0], the first element of the indices array is for the *second* element indices: &'a [u8], /// The contiguous list of `T::VarULE`s things: &'a [u8], marker: PhantomData<(&'a T, F)>, } // #[derive()] won't work here since we do not want it to be // bound on T: Copy impl<'a, T: ?Sized, F> Copy for VarZeroVecComponents<'a, T, F> {} impl<'a, T: ?Sized, F> Clone for VarZeroVecComponents<'a, T, F> { fn clone(&self) -> Self { *self } } impl<'a, T: VarULE + ?Sized, F> Default for VarZeroVecComponents<'a, T, F> { #[inline] fn default() -> Self { Self::new() } } impl<'a, T: VarULE + ?Sized, F> VarZeroVecComponents<'a, T, F> { #[inline] pub fn new() -> Self { Self { len: 0, indices: &[], things: &[], marker: PhantomData, } } } impl<'a, T: VarULE + ?Sized, F: VarZeroVecFormat> VarZeroVecComponents<'a, T, F> { /// Construct a new VarZeroVecComponents, checking invariants about the overall buffer size: /// /// - There must be either zero or at least four bytes (if four, this is the "length" parsed as a usize) /// - There must be at least `4*(length - 1) + 4` bytes total, to form the array `indices` of indices /// - `0..indices[0]` must index into a valid section of /// `things` (the data after `indices`), such that it parses to a `T::VarULE` /// - `indices[i - 1]..indices[i]` must index into a valid section of /// `things` (the data after `indices`), such that it parses to a `T::VarULE` /// - `indices[len - 2]..things.len()` must index into a valid section of /// `things`, such that it parses to a `T::VarULE` #[inline] pub fn parse_bytes(slice: &'a [u8]) -> Result<Self, VarZeroVecFormatError> { // The empty VZV is special-cased to the empty slice if slice.is_empty() { return Ok(VarZeroVecComponents { len: 0, indices: &[], things: &[], marker: PhantomData, }); } let len_bytes = slice .get(0..F::Len::SIZE) .ok_or(VarZeroVecFormatError::Metadata)?; let len_ule = F::Len::parse_bytes_to_slice(len_bytes).map_err(|_| VarZeroVecFormatError::Metadata)?; let len = len_ule .first() .ok_or(VarZeroVecFormatError::Metadata)? .iule_to_usize(); let rest = slice .get(F::Len::SIZE..) .ok_or(VarZeroVecFormatError::Metadata)?; let len_u32 = u32::try_from(len).map_err(|_| VarZeroVecFormatError::Metadata); // We pass down the rest of the invariants Self::parse_bytes_with_length(len_u32?, rest) } /// Construct a new VarZeroVecComponents, checking invariants about the overall buffer size: /// /// - There must be at least `4*len` bytes total, to form the array `indices` of indices. /// - `indices[i]..indices[i+1]` must index into a valid section of /// `things` (the data after `indices`), such that it parses to a `T::VarULE` /// - `indices[len - 1]..things.len()` must index into a valid section of /// `things`, such that it parses to a `T::VarULE` #[inline] pub fn parse_bytes_with_length( len: u32, slice: &'a [u8], ) -> Result<Self, VarZeroVecFormatError> { let len_minus_one = len.checked_sub(1); // The empty VZV is special-cased to the empty slice let Some(len_minus_one) = len_minus_one else { return Ok(VarZeroVecComponents { len: 0, indices: &[], things: &[], marker: PhantomData, }); }; // The indices array is one element shorter since the first index is always 0, // so we use len_minus_one let indices_bytes = slice .get(..F::Index::SIZE * (len_minus_one as usize)) .ok_or(VarZeroVecFormatError::Metadata)?; let things = slice .get(F::Index::SIZE * (len_minus_one as usize)..) .ok_or(VarZeroVecFormatError::Metadata)?; let borrowed = VarZeroVecComponents { len, indices: indices_bytes, things, marker: PhantomData, }; borrowed.check_indices_and_things()?; Ok(borrowed) } /// Construct a [`VarZeroVecComponents`] from a byte slice that has previously /// successfully returned a [`VarZeroVecComponents`] when passed to /// [`VarZeroVecComponents::parse_bytes()`]. Will return the same /// object as one would get from calling [`VarZeroVecComponents::parse_bytes()`]. /// /// # Safety /// The bytes must have previously successfully run through /// [`VarZeroVecComponents::parse_bytes()`] pub unsafe fn from_bytes_unchecked(slice: &'a [u8]) -> Self { // The empty VZV is special-cased to the empty slice if slice.is_empty() { return VarZeroVecComponents { len: 0, indices: &[], things: &[], marker: PhantomData, }; } let (len_bytes, data_bytes) = unsafe { slice.split_at_unchecked(F::Len::SIZE) }; // Safety: F::Len allows all byte sequences let len_ule = F::Len::slice_from_bytes_unchecked(len_bytes); let len = len_ule.get_unchecked(0).iule_to_usize(); let len_u32 = len as u32; // Safety: This method requires the bytes to have passed through `parse_bytes()` // whereas we're calling something that asks for `parse_bytes_with_length()`. // The two methods perform similar validation, with parse_bytes() validating an additional // 4-byte `length` header. Self::from_bytes_unchecked_with_length(len_u32, data_bytes) } /// Construct a [`VarZeroVecComponents`] from a byte slice that has previously /// successfully returned a [`VarZeroVecComponents`] when passed to /// [`VarZeroVecComponents::parse_bytes()`]. Will return the same /// object as one would get from calling [`VarZeroVecComponents::parse_bytes()`]. /// /// # Safety /// The len,bytes must have previously successfully run through /// [`VarZeroVecComponents::parse_bytes_with_length()`] pub unsafe fn from_bytes_unchecked_with_length(len: u32, slice: &'a [u8]) -> Self { let len_minus_one = len.checked_sub(1); // The empty VZV is special-cased to the empty slice let Some(len_minus_one) = len_minus_one else { return VarZeroVecComponents { len: 0, indices: &[], things: &[], marker: PhantomData, }; }; // The indices array is one element shorter since the first index is always 0, // so we use len_minus_one let indices_bytes = slice.get_unchecked(..F::Index::SIZE * (len_minus_one as usize)); let things = slice.get_unchecked(F::Index::SIZE * (len_minus_one as usize)..); VarZeroVecComponents { len, indices: indices_bytes, things, marker: PhantomData, } } /// Get the number of elements in this vector #[inline] pub fn len(self) -> usize { self.len as usize } /// Returns `true` if the vector contains no elements. #[inline] pub fn is_empty(self) -> bool { self.len == 0 } /// Get the idx'th element out of this slice. Returns `None` if out of bounds. #[inline] pub fn get(self, idx: usize) -> Option<&'a T> { if idx >= self.len() { return None; } Some(unsafe { self.get_unchecked(idx) }) } /// Get the idx'th element out of this slice. Does not bounds check. /// /// Safety: /// - `idx` must be in bounds (`idx < self.len()`) #[inline] pub(crate) unsafe fn get_unchecked(self, idx: usize) -> &'a T { let range = self.get_things_range(idx); let things_slice = self.things.get_unchecked(range); T::from_bytes_unchecked(things_slice) } /// Get the range in `things` for the element at `idx`. Does not bounds check. /// /// Safety: /// - `idx` must be in bounds (`idx < self.len()`) #[inline] pub(crate) unsafe fn get_things_range(self, idx: usize) -> Range<usize> { let start = if let Some(idx_minus_one) = idx.checked_sub(1) { self.indices_slice() .get_unchecked(idx_minus_one) .iule_to_usize() } else { 0 }; let end = if idx + 1 == self.len() { self.things.len() } else { self.indices_slice().get_unchecked(idx).iule_to_usize() }; debug_assert!(start <= end); start..end } /// Get the size, in bytes, of the indices array pub(crate) unsafe fn get_indices_size(self) -> usize { self.indices.len() } /// Check the internal invariants of VarZeroVecComponents: /// /// - `indices[i]..indices[i+1]` must index into a valid section of /// `things`, such that it parses to a `T::VarULE` /// - `indices[len - 1]..things.len()` must index into a valid section of /// `things`, such that it parses to a `T::VarULE` /// - `indices` is monotonically increasing /// /// This method is NOT allowed to call any other methods on VarZeroVecComponents since all other methods /// assume that the slice has been passed through check_indices_and_things #[inline] #[allow(clippy::len_zero)] // more explicit to enforce safety invariants fn check_indices_and_things(self) -> Result<(), VarZeroVecFormatError> { if self.len() == 0 { if self.things.len() > 0 { return Err(VarZeroVecFormatError::Metadata); } else { return Ok(()); } } let indices_slice = self.indices_slice(); assert_eq!(self.len(), indices_slice.len() + 1); // Safety: i is in bounds (assertion above) let mut start = 0; for i in 0..self.len() { // The indices array is offset by 1: indices[0] is the end of the first // element and the start of the next, since the start of the first element // is always things[0]. So to get the end we get element `i`. let end = if let Some(end) = indices_slice.get(i) { end.iule_to_usize() } else { // This only happens at i = self.len() - 1 = indices_slice.len() + 1 - 1 // = indices_slice.len(). This is the last `end`, which is always the size of // `things` and thus never stored in the array self.things.len() }; if start > end { return Err(VarZeroVecFormatError::Metadata); } if end > self.things.len() { return Err(VarZeroVecFormatError::Metadata); } // Safety: start..end is a valid range in self.things let bytes = unsafe { self.things.get_unchecked(start..end) }; T::parse_bytes(bytes).map_err(VarZeroVecFormatError::Values)?; start = end; } Ok(()) } /// Create an iterator over the Ts contained in VarZeroVecComponents #[inline] pub fn iter(self) -> VarZeroSliceIter<'a, T, F> { VarZeroSliceIter::new(self) } #[cfg(feature = "alloc")] pub fn to_vec(self) -> alloc::vec::Vec<alloc::boxed::Box<T>> { self.iter().map(T::to_boxed).collect() } #[inline] fn indices_slice(&self) -> &'a [F::Index] { unsafe { F::Index::slice_from_bytes_unchecked(self.indices) } } // Dump a debuggable representation of this type #[allow(unused)] // useful for debugging #[cfg(feature = "alloc")] pub(crate) fn dump(&self) -> alloc::string::String { let indices = self .indices_slice() .iter() .copied() .map(IntegerULE::iule_to_usize) .collect::<alloc::vec::Vec<_>>(); alloc::format!("VarZeroVecComponents {{ indices: {indices:?} }}") } } /// An iterator over VarZeroSlice #[derive(Debug)] pub struct VarZeroSliceIter<'a, T: ?Sized, F = Index16> { components: VarZeroVecComponents<'a, T, F>, index: usize, // Safety invariant: must be a valid index into the data segment of `components`, or an index at the end // i.e. start_index <= components.things.len() // // It must be a valid index into the `things` array of components, coming from `components.indices_slice()` start_index: usize, } impl<'a, T: VarULE + ?Sized, F: VarZeroVecFormat> VarZeroSliceIter<'a, T, F> { fn new(c: VarZeroVecComponents<'a, T, F>) -> Self { Self { components: c, index: 0, // Invariant upheld, 0 is always a valid index-or-end start_index: 0, } } } impl<'a, T: VarULE + ?Sized, F: VarZeroVecFormat> Iterator for VarZeroSliceIter<'a, T, F> { type Item = &'a T; fn next(&mut self) -> Option<Self::Item> { // Note: the indices array doesn't contain 0 or len, we need to specially handle those edges. The 0 is handled // by start_index, and the len is handled by the code for `end`. if self.index >= self.components.len() { return None; } // Invariant established: self.index is in bounds for self.components.len(), // which means it is in bounds for self.components.indices_slice() since that has the same length let end = if self.index + 1 == self.components.len() { // We don't store the end index since it is computable, so the last element should use self.components.things.len() self.components.things.len() } else { // Safety: self.index was known to be in bounds from the bounds check above. unsafe { self.components .indices_slice() .get_unchecked(self.index) .iule_to_usize() } }; // Invariant established: end has the same invariant as self.start_index since it comes from indices_slice, which is guaranteed // to only contain valid indexes let item = unsafe { // Safety: self.start_index and end both have in-range invariants, plus they are valid indices from indices_slice // which means we can treat this data as a T T::from_bytes_unchecked(self.components.things.get_unchecked(self.start_index..end)) }; self.index += 1; // Invariant upheld: end has the same invariant as self.start_index self.start_index = end; Some(item) } fn size_hint(&self) -> (usize, Option<usize>) { let remainder = self.components.len() - self.index; (remainder, Some(remainder)) } } impl<'a, T: VarULE + ?Sized, F: VarZeroVecFormat> ExactSizeIterator for VarZeroSliceIter<'a, T, F> { fn len(&self) -> usize { self.components.len() - self.index } } impl<'a, T, F> VarZeroVecComponents<'a, T, F> where T: VarULE, T: ?Sized, T: Ord, F: VarZeroVecFormat, { /// Binary searches a sorted `VarZeroVecComponents<T>` for the given element. For more information, see /// the primitive function [`binary_search`](slice::binary_search). pub fn binary_search(&self, needle: &T) -> Result<usize, usize> { self.binary_search_by(|probe| probe.cmp(needle)) } pub fn binary_search_in_range( &self, needle: &T, range: Range<usize>, ) -> Option<Result<usize, usize>> { self.binary_search_in_range_by(|probe| probe.cmp(needle), range) } } impl<'a, T, F> VarZeroVecComponents<'a, T, F> where T: VarULE, T: ?Sized, F: VarZeroVecFormat, { /// Binary searches a sorted `VarZeroVecComponents<T>` for the given predicate. For more information, see /// the primitive function [`binary_search_by`](slice::binary_search_by). pub fn binary_search_by(&self, predicate: impl FnMut(&T) -> Ordering) -> Result<usize, usize> { // Safety: 0 and len are in range unsafe { self.binary_search_in_range_unchecked(predicate, 0..self.len()) } } // Binary search within a range. // Values returned are relative to the range start! pub fn binary_search_in_range_by( &self, predicate: impl FnMut(&T) -> Ordering, range: Range<usize>, ) -> Option<Result<usize, usize>> { if range.end > self.len() { return None; } if range.end < range.start { return None; } // Safety: We bounds checked above: end is in-bounds or len, and start is <= end let range_absolute = unsafe { self.binary_search_in_range_unchecked(predicate, range.clone()) }; // The values returned are relative to the range start Some( range_absolute .map(|o| o - range.start) .map_err(|e| e - range.start), ) } /// Safety: range must be in range for the slice (start <= len, end <= len, start <= end) unsafe fn binary_search_in_range_unchecked( &self, mut predicate: impl FnMut(&T) -> Ordering, range: Range<usize>, ) -> Result<usize, usize> { // Function invariant: size is always end - start let mut start = range.start; let mut end = range.end; let mut size; // Loop invariant: 0 <= start < end <= len // This invariant is initialized by the function safety invariants and the loop condition while start < end { size = end - start; // This establishes mid < end (which implies mid < len) // size is end - start. start + size is end (which is <= len). // mid = start + size/2 will be less than end let mid = start + size / 2; // Safety: mid is < end <= len, so in-range let cmp = predicate(self.get_unchecked(mid)); match cmp { Ordering::Less => { // This retains the loop invariant since it // increments start, and we already have 0 <= start // start < end is enforced by the loop condition start = mid + 1; } Ordering::Greater => { // mid < end, so this decreases end. // This means end <= len is still true, and // end > start is enforced by the loop condition end = mid; } Ordering::Equal => return Ok(mid), } } Err(start) } } /// Collects the bytes for a VarZeroSlice into a Vec. #[cfg(feature = "alloc")] pub fn get_serializable_bytes_non_empty<T, A, F>(elements: &[A]) -> Option<alloc::vec::Vec<u8>> where T: VarULE + ?Sized, A: EncodeAsVarULE<T>, F: VarZeroVecFormat, { debug_assert!(!elements.is_empty()); let len = compute_serializable_len::<T, A, F>(elements)?; debug_assert!( len >= F::Len::SIZE as u32, "Must have at least F::Len::SIZE bytes to hold the length of the vector" ); let mut output = alloc::vec![0u8; len as usize]; write_serializable_bytes::<T, A, F>(elements, &mut output); Some(output) } /// Writes the bytes for a VarZeroLengthlessSlice into an output buffer. /// Usable for a VarZeroSlice if you first write the length bytes. /// /// Every byte in the buffer will be initialized after calling this function. /// /// # Panics /// /// Panics if the buffer is not exactly the correct length. pub fn write_serializable_bytes_without_length<T, A, F>(elements: &[A], output: &mut [u8]) where T: VarULE + ?Sized, A: EncodeAsVarULE<T>, F: VarZeroVecFormat, { assert!(elements.len() <= F::Len::MAX_VALUE as usize); if elements.is_empty() { return; } // idx_offset = offset from the start of the buffer for the next index let mut idx_offset: usize = 0; // first_dat_offset = offset from the start of the buffer of the first data block let first_dat_offset: usize = idx_offset + (elements.len() - 1) * F::Index::SIZE; // dat_offset = offset from the start of the buffer of the next data block let mut dat_offset: usize = first_dat_offset; for (i, element) in elements.iter().enumerate() { let element_len = element.encode_var_ule_len(); // The first index is always 0. We don't write it, or update the idx offset. if i != 0 { let idx_limit = idx_offset + F::Index::SIZE; #[allow(clippy::indexing_slicing)] // Function contract allows panicky behavior let idx_slice = &mut output[idx_offset..idx_limit]; // VZV expects data offsets to be stored relative to the first data block let idx = dat_offset - first_dat_offset; assert!(idx <= F::Index::MAX_VALUE as usize); #[allow(clippy::expect_used)] // this function is explicitly panicky let bytes_to_write = F::Index::iule_from_usize(idx).expect(F::Index::TOO_LARGE_ERROR); idx_slice.copy_from_slice(ULE::slice_as_bytes(&[bytes_to_write])); idx_offset = idx_limit; } let dat_limit = dat_offset + element_len; #[allow(clippy::indexing_slicing)] // Function contract allows panicky behavior let dat_slice = &mut output[dat_offset..dat_limit]; element.encode_var_ule_write(dat_slice); debug_assert_eq!(T::validate_bytes(dat_slice), Ok(())); dat_offset = dat_limit; } debug_assert_eq!(idx_offset, F::Index::SIZE * (elements.len() - 1)); assert_eq!(dat_offset, output.len()); } /// Writes the bytes for a VarZeroSlice into an output buffer. /// /// Every byte in the buffer will be initialized after calling this function. /// /// # Panics /// /// Panics if the buffer is not exactly the correct length. pub fn write_serializable_bytes<T, A, F>(elements: &[A], output: &mut [u8]) where T: VarULE + ?Sized, A: EncodeAsVarULE<T>, F: VarZeroVecFormat, { if elements.is_empty() { return; } assert!(elements.len() <= F::Len::MAX_VALUE as usize); #[allow(clippy::expect_used)] // This function is explicitly panicky let num_elements_ule = F::Len::iule_from_usize(elements.len()).expect(F::Len::TOO_LARGE_ERROR); #[allow(clippy::indexing_slicing)] // Function contract allows panicky behavior output[0..F::Len::SIZE].copy_from_slice(ULE::slice_as_bytes(&[num_elements_ule])); #[allow(clippy::indexing_slicing)] // Function contract allows panicky behavior write_serializable_bytes_without_length::<T, A, F>(elements, &mut output[F::Len::SIZE..]); } pub fn compute_serializable_len_without_length<T, A, F>(elements: &[A]) -> Option<u32> where T: VarULE + ?Sized, A: EncodeAsVarULE<T>, F: VarZeroVecFormat, { let elements_len = elements.len(); let Some(elements_len_minus_one) = elements_len.checked_sub(1) else { // Empty vec is optimized to an empty byte representation return Some(0); }; let idx_len: u32 = u32::try_from(elements_len_minus_one) .ok()? .checked_mul(F::Index::SIZE as u32)?; let data_len: u32 = elements .iter() .map(|v| u32::try_from(v.encode_var_ule_len()).ok()) .try_fold(0u32, |s, v| s.checked_add(v?))?; let ret = idx_len.checked_add(data_len); if let Some(r) = ret { if r >= F::Index::MAX_VALUE { return None; } } ret } pub fn compute_serializable_len<T, A, F>(elements: &[A]) -> Option<u32> where T: VarULE + ?Sized, A: EncodeAsVarULE<T>, F: VarZeroVecFormat, { compute_serializable_len_without_length::<T, A, F>(elements).map(|x| x + F::Len::SIZE as u32) }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use crate::vecs::{Index16, Index32}; use crate::{ule::VarULE, VarZeroSlice, VarZeroVec}; use databake::*; impl<T: VarULE + ?Sized> Bake for VarZeroVec<'_, T, Index16> { fn bake(&self, env: &CrateEnv) -> TokenStream { env.insert("zerovec"); if self.is_empty() { quote! { zerovec::vecs::VarZeroVec16::new() } } else { let bytes = databake::Bake::bake(&self.as_bytes(), env); // Safe because self.as_bytes is a safe input quote! { unsafe { zerovec::vecs::VarZeroVec16::from_bytes_unchecked(#bytes) } } } } } impl<T: VarULE + ?Sized> Bake for VarZeroVec<'_, T, Index32> { fn bake(&self, env: &CrateEnv) -> TokenStream { env.insert("zerovec"); if self.is_empty() { quote! { zerovec::vecs::VarZeroVec32::new() } } else { let bytes = databake::Bake::bake(&self.as_bytes(), env); // Safe because self.as_bytes is a safe input quote! { unsafe { zerovec::vecs::VarZeroVec32::from_bytes_unchecked(#bytes) } } } } } impl<T: VarULE + ?Sized> BakeSize for VarZeroVec<'_, T, Index16> { fn borrows_size(&self) -> usize { self.as_bytes().len() } } impl<T: VarULE + ?Sized> BakeSize for VarZeroVec<'_, T, Index32> { fn borrows_size(&self) -> usize { self.as_bytes().len() } } impl<T: VarULE + ?Sized> Bake for &VarZeroSlice<T, Index16> { fn bake(&self, env: &CrateEnv) -> TokenStream { env.insert("zerovec"); if self.is_empty() { quote! { zerovec::vecs::VarZeroSlice16::new_empty() } } else { let bytes = databake::Bake::bake(&self.as_bytes(), env); // Safe because self.as_bytes is a safe input quote! { unsafe { zerovec::vecs::VarZeroSlice16::from_bytes_unchecked(#bytes) } } } } } impl<T: VarULE + ?Sized> Bake for &VarZeroSlice<T, Index32> { fn bake(&self, env: &CrateEnv) -> TokenStream { env.insert("zerovec"); if self.is_empty() { quote! { zerovec::vecs::VarZeroSlice32::new_empty() } } else { let bytes = databake::Bake::bake(&self.as_bytes(), env); // Safe because self.as_bytes is a safe input quote! { unsafe { zerovec::vecs::VarZeroSlice32::from_bytes_unchecked(#bytes) } } } } } impl<T: VarULE + ?Sized> BakeSize for &VarZeroSlice<T, Index16> { fn borrows_size(&self) -> usize { if self.is_empty() { 0 } else { self.as_bytes().len() } } } impl<T: VarULE + ?Sized> BakeSize for &VarZeroSlice<T, Index32> { fn borrows_size(&self) -> usize { if self.is_empty() { 0 } else { self.as_bytes().len() } } } #[test] fn test_baked_vec() { test_bake!( VarZeroVec<str>, const, crate::vecs::VarZeroVec16::new(), zerovec ); test_bake!( VarZeroVec<str>, const, unsafe { crate::vecs::VarZeroVec16::from_bytes_unchecked(b"\x02\0\0\0\0\0\x05\0helloworld") }, zerovec ); } #[test] fn test_baked_slice() { test_bake!( &VarZeroSlice<str>, const, crate::vecs::VarZeroSlice16::new_empty(), zerovec ); test_bake!( &VarZeroSlice<str>, const, unsafe { crate::vecs::VarZeroSlice16::from_bytes_unchecked(b"\x02\0\0\0\0\0\x05\0helloworld") }, zerovec ); }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use core::fmt::Display; #[derive(Debug)] pub enum VarZeroVecFormatError { /// The byte buffer was not in the appropriate format for VarZeroVec. Metadata, /// One of the values could not be decoded. Values(crate::ule::UleError), } impl core::error::Error for VarZeroVecFormatError {} impl Display for VarZeroVecFormatError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { Self::Metadata => write!(f, "VarZeroVecFormatError: metadata"), Self::Values(e) => write!(f, "VarZeroVecFormatError: {e}"), } } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use super::components::VarZeroVecComponents; use super::*; use crate::ule::*; use core::marker::PhantomData; use core::mem; /// A slice representing the index and data tables of a VarZeroVec, /// *without* any length fields. The length field is expected to be stored elsewhere. /// /// Without knowing the length this is of course unsafe to use directly. #[repr(transparent)] #[derive(PartialEq, Eq)] pub(crate) struct VarZeroLengthlessSlice<T: ?Sized, F> { marker: PhantomData<(F, T)>, /// The original slice this was constructed from // Safety invariant: This field must have successfully passed through // VarZeroVecComponents::parse_bytes_with_length() with the length // associated with this value. entire_slice: [u8], } impl<T: VarULE + ?Sized, F: VarZeroVecFormat> VarZeroLengthlessSlice<T, F> { /// Obtain a [`VarZeroVecComponents`] borrowing from the internal buffer /// /// Safety: `len` must be the length associated with this value #[inline] pub(crate) unsafe fn as_components<'a>(&'a self, len: u32) -> VarZeroVecComponents<'a, T, F> { unsafe { // safety: VarZeroSlice is guaranteed to parse here VarZeroVecComponents::from_bytes_unchecked_with_length(len, &self.entire_slice) } } /// Parse a VarZeroLengthlessSlice from a slice of the appropriate format /// /// Slices of the right format can be obtained via [`VarZeroSlice::as_bytes()`] pub fn parse_bytes<'a>(len: u32, slice: &'a [u8]) -> Result<&'a Self, UleError> { let _ = VarZeroVecComponents::<T, F>::parse_bytes_with_length(len, slice) .map_err(|_| UleError::parse::<Self>())?; unsafe { // Safety: We just verified that it is of the correct format. Ok(Self::from_bytes_unchecked(slice)) } } /// Uses a `&[u8]` buffer as a `VarZeroLengthlessSlice<T>` without any verification. /// /// # Safety /// /// `bytes` need to be an output from [`VarZeroLengthlessSlice::as_bytes()`], or alternatively /// successfully pass through `parse_bytes` (with `len`) /// /// The length associated with this value will be the length associated with the original slice. pub(crate) const unsafe fn from_bytes_unchecked(bytes: &[u8]) -> &Self { // self is really just a wrapper around a byte slice mem::transmute(bytes) } /// Uses a `&mut [u8]` buffer as a `VarZeroLengthlessSlice<T>` without any verification. /// /// # Safety /// /// `bytes` need to be an output from [`VarZeroLengthlessSlice::as_bytes()`], or alternatively /// be valid to be passed to `from_bytes_unchecked_with_length` /// /// The length associated with this value will be the length associated with the original slice. pub(crate) unsafe fn from_bytes_unchecked_mut(bytes: &mut [u8]) -> &mut Self { // self is really just a wrapper around a byte slice mem::transmute(bytes) } /// Get one of this slice's elements /// /// # Safety /// /// `index` must be in range, and `len` must be the length associated with this /// instance of VarZeroLengthlessSlice. pub(crate) unsafe fn get_unchecked(&self, len: u32, idx: usize) -> &T { self.as_components(len).get_unchecked(idx) } /// Get a reference to the entire encoded backing buffer of this slice /// /// The bytes can be passed back to [`Self::parse_bytes()`]. /// /// To take the bytes as a vector, see [`VarZeroVec::into_bytes()`]. #[inline] pub(crate) const fn as_bytes(&self) -> &[u8] { &self.entire_slice } /// Get the bytes behind this as a mutable slice /// /// # Safety /// /// - `len` is the length associated with this VarZeroLengthlessSlice /// - The resultant slice is only mutated in a way such that it remains a valid `T` /// /// # Panics /// /// Panics when idx is not in bounds for this slice pub(crate) unsafe fn get_bytes_at_mut(&mut self, len: u32, idx: usize) -> &mut [u8] { let components = self.as_components(len); let range = components.get_things_range(idx); let offset = components.get_indices_size(); // get_indices_size() returns the start of the things slice, and get_things_range() // returns a range in-bounds of the things slice #[allow(clippy::indexing_slicing)] &mut self.entire_slice[offset..][range] } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). //! See [`VarZeroVec`](crate::VarZeroVec) for details pub(crate) mod components; pub(crate) mod error; pub(crate) mod lengthless; #[cfg(feature = "alloc")] pub(crate) mod owned; pub(crate) mod slice; pub(crate) mod vec; #[cfg(feature = "databake")] mod databake; #[cfg(feature = "serde")] mod serde; pub use crate::{VarZeroSlice, VarZeroVec}; #[doc(hidden)] pub use components::VarZeroVecComponents; pub use components::{Index16, Index32, Index8, VarZeroSliceIter, VarZeroVecFormat}; #[cfg(feature = "alloc")] pub use owned::VarZeroVecOwned; pub use error::VarZeroVecFormatError;
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). // The mutation operations in this file should panic to prevent undefined behavior #![allow(clippy::unwrap_used)] #![allow(clippy::expect_used)] #![allow(clippy::indexing_slicing)] #![allow(clippy::panic)] use super::*; use crate::ule::*; use alloc::vec::Vec; use core::any; use core::convert::TryInto; use core::marker::PhantomData; use core::ops::Deref; use core::ops::Range; use core::{fmt, ptr, slice}; use super::components::IntegerULE; /// A fully-owned [`VarZeroVec`]. This type has no lifetime but has the same /// internal buffer representation of [`VarZeroVec`], making it cheaply convertible to /// [`VarZeroVec`] and [`VarZeroSlice`]. /// /// The `F` type parameter is a [`VarZeroVecFormat`] (see its docs for more details), which can be used to select the /// precise format of the backing buffer with various size and performance tradeoffs. It defaults to [`Index16`]. pub struct VarZeroVecOwned<T: ?Sized, F = Index16> { marker1: PhantomData<T>, marker2: PhantomData<F>, // safety invariant: must parse into a valid VarZeroVecComponents entire_slice: Vec<u8>, } impl<T: ?Sized, F> Clone for VarZeroVecOwned<T, F> { fn clone(&self) -> Self { VarZeroVecOwned { marker1: PhantomData, marker2: PhantomData, entire_slice: self.entire_slice.clone(), } } } // The effect of a shift on the indices in the varzerovec. #[derive(PartialEq)] enum ShiftType { Insert, Replace, Remove, } impl<T: VarULE + ?Sized, F: VarZeroVecFormat> Deref for VarZeroVecOwned<T, F> { type Target = VarZeroSlice<T, F>; fn deref(&self) -> &VarZeroSlice<T, F> { self.as_slice() } } impl<T: VarULE + ?Sized, F> VarZeroVecOwned<T, F> { /// Construct an empty VarZeroVecOwned pub fn new() -> Self { Self { marker1: PhantomData, marker2: PhantomData, entire_slice: Vec::new(), } } } impl<T: VarULE + ?Sized, F: VarZeroVecFormat> VarZeroVecOwned<T, F> { /// Construct a VarZeroVecOwned from a [`VarZeroSlice`] by cloning the internal data pub fn from_slice(slice: &VarZeroSlice<T, F>) -> Self { Self { marker1: PhantomData, marker2: PhantomData, entire_slice: slice.as_bytes().into(), } } /// Construct a VarZeroVecOwned from a list of elements pub fn try_from_elements<A>(elements: &[A]) -> Result<Self, &'static str> where A: EncodeAsVarULE<T>, { Ok(if elements.is_empty() { Self::from_slice(VarZeroSlice::new_empty()) } else { Self { marker1: PhantomData, marker2: PhantomData, // TODO(#1410): Rethink length errors in VZV. entire_slice: components::get_serializable_bytes_non_empty::<T, A, F>(elements) .ok_or(F::Index::TOO_LARGE_ERROR)?, } }) } /// Obtain this `VarZeroVec` as a [`VarZeroSlice`] pub fn as_slice(&self) -> &VarZeroSlice<T, F> { let slice: &[u8] = &self.entire_slice; unsafe { // safety: the slice is known to come from a valid parsed VZV VarZeroSlice::from_bytes_unchecked(slice) } } /// Try to allocate a buffer with enough capacity for `capacity` /// elements. Since `T` can take up an arbitrary size this will /// just allocate enough space for 4-byte Ts pub(crate) fn with_capacity(capacity: usize) -> Self { Self { marker1: PhantomData, marker2: PhantomData, entire_slice: Vec::with_capacity(capacity * (F::Index::SIZE + 4)), } } /// Try to reserve space for `capacity` /// elements. Since `T` can take up an arbitrary size this will /// just allocate enough space for 4-byte Ts pub(crate) fn reserve(&mut self, capacity: usize) { self.entire_slice.reserve(capacity * (F::Index::SIZE + 4)) } /// Get the position of a specific element in the data segment. /// /// If `idx == self.len()`, it will return the size of the data segment (where a new element would go). /// /// ## Safety /// `idx <= self.len()` and `self.as_encoded_bytes()` is well-formed. unsafe fn element_position_unchecked(&self, idx: usize) -> usize { let len = self.len(); let out = if idx == len { self.entire_slice.len() - F::Len::SIZE - (F::Index::SIZE * (len - 1)) } else if let Some(idx) = self.index_data(idx) { idx.iule_to_usize() } else { 0 }; debug_assert!(out + F::Len::SIZE + (len - 1) * F::Index::SIZE <= self.entire_slice.len()); out } /// Get the range of a specific element in the data segment. /// /// ## Safety /// `idx < self.len()` and `self.as_encoded_bytes()` is well-formed. unsafe fn element_range_unchecked(&self, idx: usize) -> core::ops::Range<usize> { let start = self.element_position_unchecked(idx); let end = self.element_position_unchecked(idx + 1); debug_assert!(start <= end, "{start} > {end}"); start..end } /// Set the number of elements in the list without any checks. /// /// ## Safety /// No safe functions may be called until `self.as_encoded_bytes()` is well-formed. unsafe fn set_len(&mut self, len: usize) { assert!(len <= F::Len::MAX_VALUE as usize); let len_bytes = len.to_le_bytes(); let len_ule = F::Len::iule_from_usize(len).expect(F::Len::TOO_LARGE_ERROR); self.entire_slice[0..F::Len::SIZE].copy_from_slice(ULE::slice_as_bytes(&[len_ule])); // Double-check that the length fits in the length field assert_eq!(len_bytes[F::Len::SIZE..].iter().sum::<u8>(), 0); } /// Get the range in the full data for a given index. Returns None for index 0 /// since there is no stored index for it. fn index_range(index: usize) -> Option<Range<usize>> { let index_minus_one = index.checked_sub(1)?; let pos = F::Len::SIZE + F::Index::SIZE * index_minus_one; Some(pos..pos + F::Index::SIZE) } /// Return the raw bytes representing the given `index`. Returns None when given index 0 /// /// ## Safety /// The index must be valid, and self.as_encoded_bytes() must be well-formed unsafe fn index_data(&self, index: usize) -> Option<&F::Index> { let index_range = Self::index_range(index)?; Some(&F::Index::slice_from_bytes_unchecked(&self.entire_slice[index_range])[0]) } /// Return the mutable slice representing the given `index`. Returns None when given index 0 /// /// ## Safety /// The index must be valid. self.as_encoded_bytes() must have allocated space /// for this index, but need not have its length appropriately set. unsafe fn index_data_mut(&mut self, index: usize) -> Option<&mut F::Index> { let ptr = self.entire_slice.as_mut_ptr(); let range = Self::index_range(index)?; // Doing this instead of just `get_unchecked_mut()` because it's unclear // if `get_unchecked_mut()` can be called out of bounds on a slice even // if we know the buffer is larger. let data = slice::from_raw_parts_mut(ptr.add(range.start), F::Index::SIZE); Some(&mut F::Index::iule_from_bytes_unchecked_mut(data)[0]) } /// Shift the indices starting with and after `starting_index` by the provided `amount`. /// /// ## Panics /// Should never be called with a starting index of 0, since that index cannot be shifted. /// /// ## Safety /// Adding `amount` to each index after `starting_index` must not result in the slice from becoming malformed. /// The length of the slice must be correctly set. unsafe fn shift_indices(&mut self, starting_index: usize, amount: i32) { let normalized_idx = starting_index .checked_sub(1) .expect("shift_indices called with a 0 starting index"); let len = self.len(); let indices = F::Index::iule_from_bytes_unchecked_mut( &mut self.entire_slice[F::Len::SIZE..F::Len::SIZE + F::Index::SIZE * (len - 1)], ); for idx in &mut indices[normalized_idx..] { let mut new_idx = idx.iule_to_usize(); if amount > 0 { new_idx = new_idx.checked_add(amount.try_into().unwrap()).unwrap(); } else { new_idx = new_idx.checked_sub((-amount).try_into().unwrap()).unwrap(); } *idx = F::Index::iule_from_usize(new_idx).expect(F::Index::TOO_LARGE_ERROR); } } /// Get this [`VarZeroVecOwned`] as a borrowed [`VarZeroVec`] /// /// If you wish to repeatedly call methods on this [`VarZeroVecOwned`], /// it is more efficient to perform this conversion first pub fn as_varzerovec<'a>(&'a self) -> VarZeroVec<'a, T, F> { self.as_slice().into() } /// Empty the vector pub fn clear(&mut self) { self.entire_slice.clear() } /// Consume this vector and return the backing buffer #[inline] pub fn into_bytes(self) -> Vec<u8> { self.entire_slice } /// Invalidate and resize the data at an index, optionally inserting or removing the index. /// Also updates affected indices and the length. /// /// `new_size` is the encoded byte size of the element that is going to be inserted /// /// Returns a slice to the new element data - it doesn't contain uninitialized data but its value is indeterminate. /// /// ## Safety /// - `index` must be a valid index, or, if `shift_type == ShiftType::Insert`, `index == self.len()` is allowed. /// - `new_size` musn't result in the data segment growing larger than `F::Index::MAX_VALUE`. unsafe fn shift(&mut self, index: usize, new_size: usize, shift_type: ShiftType) -> &mut [u8] { // The format of the encoded data is: // - four bytes of "len" // - len*4 bytes for an array of indices // - the actual data to which the indices point // // When inserting or removing an element, the size of the indices segment must be changed, // so the data before the target element must be shifted by 4 bytes in addition to the // shifting needed for the new element size. let len = self.len(); let slice_len = self.entire_slice.len(); let prev_element = match shift_type { ShiftType::Insert => { let pos = self.element_position_unchecked(index); // In the case of an insert, there's no previous element, // so it's an empty range at the new position. pos..pos } _ => self.element_range_unchecked(index), }; // How much shifting must be done in bytes due to removal/insertion of an index. let index_shift: i64 = match shift_type { ShiftType::Insert => F::Index::SIZE as i64, ShiftType::Replace => 0, ShiftType::Remove => -(F::Index::SIZE as i64), }; // The total shift in byte size of the owned slice. let shift: i64 = new_size as i64 - (prev_element.end - prev_element.start) as i64 + index_shift; let new_slice_len = slice_len.wrapping_add(shift as usize); if shift > 0 { if new_slice_len > F::Index::MAX_VALUE as usize { panic!( "Attempted to grow VarZeroVec to an encoded size that does not fit within the length size used by {}", any::type_name::<F>() ); } self.entire_slice.resize(new_slice_len, 0); } // Now that we've ensured there's enough space, we can shift the data around. { // Note: There are no references introduced between pointer creation and pointer use, and all // raw pointers are derived from a single &mut. This preserves pointer provenance. let slice_range = self.entire_slice.as_mut_ptr_range(); // The start of the indices buffer let indices_start = slice_range.start.add(F::Len::SIZE); let old_slice_end = slice_range.start.add(slice_len); let data_start = indices_start.add((len - 1) * F::Index::SIZE); let prev_element_p = data_start.add(prev_element.start)..data_start.add(prev_element.end); // The memory range of the affected index. // When inserting: where the new index goes. // When removing: where the index being removed is. // When replacing: unused. // Will be None when the affected index is index 0, which is special let index_range = if let Some(index_minus_one) = index.checked_sub(1) { let index_start = indices_start.add(F::Index::SIZE * index_minus_one); Some(index_start..index_start.add(F::Index::SIZE)) } else { None }; unsafe fn shift_bytes(block: Range<*const u8>, to: *mut u8) { debug_assert!(block.end >= block.start); ptr::copy(block.start, to, block.end.offset_from(block.start) as usize); } if shift_type == ShiftType::Remove { if let Some(ref index_range) = index_range { shift_bytes(index_range.end..prev_element_p.start, index_range.start); } else { // We are removing the first index, so we skip the second index and copy it over. The second index // is now zero and unnecessary. shift_bytes( indices_start.add(F::Index::SIZE)..prev_element_p.start, indices_start, ) } } // Shift data after the element to its new position. shift_bytes( prev_element_p.end..old_slice_end, prev_element_p .start .offset((new_size as i64 + index_shift) as isize), ); let first_affected_index = match shift_type { ShiftType::Insert => { if let Some(index_range) = index_range { // Move data before the element forward by 4 to make space for a new index. shift_bytes(index_range.start..prev_element_p.start, index_range.end); let index_data = self .index_data_mut(index) .expect("If index_range is some, index is > 0 and should not panic in index_data_mut"); *index_data = F::Index::iule_from_usize(prev_element.start) .expect(F::Index::TOO_LARGE_ERROR); } else { // We are adding a new index 0. There's nothing in the indices array for index 0, but the element // that is currently at index 0 will become index 1 and need a value // We first shift bytes to make space shift_bytes( indices_start..prev_element_p.start, indices_start.add(F::Index::SIZE), ); // And then we write a temporary zero to the zeroeth index, which will get shifted later let index_data = self .index_data_mut(1) .expect("Should be able to write to index 1"); *index_data = F::Index::iule_from_usize(0).expect("0 is always valid!"); } self.set_len(len + 1); index + 1 } ShiftType::Remove => { self.set_len(len - 1); if index == 0 { // We don't need to shift index 0 since index 0 is not stored in the indices buffer index + 1 } else { index } } ShiftType::Replace => index + 1, }; // No raw pointer use should occur after this point (because of self.index_data and self.set_len). // Set the new slice length. This must be done after shifting data around to avoid uninitialized data. self.entire_slice.set_len(new_slice_len); // Shift the affected indices. self.shift_indices(first_affected_index, (shift - index_shift) as i32); }; debug_assert!(self.verify_integrity()); // Return a mut slice to the new element data. let element_pos = F::Len::SIZE + (self.len() - 1) * F::Index::SIZE + self.element_position_unchecked(index); &mut self.entire_slice[element_pos..element_pos + new_size] } /// Checks the internal invariants of the vec to ensure safe code will not cause UB. /// Returns whether integrity was verified. /// /// Note: an index is valid if it doesn't point to data past the end of the slice and is /// less than or equal to all future indices. The length of the index segment is not part of each index. fn verify_integrity(&self) -> bool { if self.is_empty() { if self.entire_slice.is_empty() { return true; } else { panic!( "VarZeroVecOwned integrity: Found empty VarZeroVecOwned with a nonempty slice" ); } } let len = unsafe { <F::Len as ULE>::slice_from_bytes_unchecked(&self.entire_slice[..F::Len::SIZE])[0] .iule_to_usize() }; if len == 0 { // An empty vec must have an empty slice: there is only a single valid byte representation. panic!("VarZeroVecOwned integrity: Found empty VarZeroVecOwned with a nonempty slice"); } if self.entire_slice.len() < F::Len::SIZE + (len - 1) * F::Index::SIZE { panic!("VarZeroVecOwned integrity: Not enough room for the indices"); } let data_len = self.entire_slice.len() - F::Len::SIZE - (len - 1) * F::Index::SIZE; if data_len > F::Index::MAX_VALUE as usize { panic!("VarZeroVecOwned integrity: Data segment is too long"); } // Test index validity. let indices = unsafe { F::Index::slice_from_bytes_unchecked( &self.entire_slice[F::Len::SIZE..F::Len::SIZE + (len - 1) * F::Index::SIZE], ) }; for idx in indices { if idx.iule_to_usize() > data_len { panic!("VarZeroVecOwned integrity: Indices must not point past the data segment"); } } for window in indices.windows(2) { if window[0].iule_to_usize() > window[1].iule_to_usize() { panic!("VarZeroVecOwned integrity: Indices must be in non-decreasing order"); } } true } /// Insert an element at the end of this vector pub fn push<A: EncodeAsVarULE<T> + ?Sized>(&mut self, element: &A) { self.insert(self.len(), element) } /// Insert an element at index `idx` pub fn insert<A: EncodeAsVarULE<T> + ?Sized>(&mut self, index: usize, element: &A) { let len = self.len(); if index > len { panic!("Called out-of-bounds insert() on VarZeroVec, index {index} len {len}"); } let value_len = element.encode_var_ule_len(); if len == 0 { let header_len = F::Len::SIZE; // Index array is size 0 for len = 1 let cap = header_len + value_len; self.entire_slice.resize(cap, 0); self.entire_slice[0] = 1; // set length element.encode_var_ule_write(&mut self.entire_slice[header_len..]); return; } assert!(value_len < F::Index::MAX_VALUE as usize); unsafe { let place = self.shift(index, value_len, ShiftType::Insert); element.encode_var_ule_write(place); } } /// Remove the element at index `idx` pub fn remove(&mut self, index: usize) { let len = self.len(); if index >= len { panic!("Called out-of-bounds remove() on VarZeroVec, index {index} len {len}"); } if len == 1 { // This is removing the last element. Set the slice to empty to ensure all empty vecs have empty data slices. self.entire_slice.clear(); return; } unsafe { self.shift(index, 0, ShiftType::Remove); } } /// Replace the element at index `idx` with another pub fn replace<A: EncodeAsVarULE<T> + ?Sized>(&mut self, index: usize, element: &A) { let len = self.len(); if index >= len { panic!("Called out-of-bounds replace() on VarZeroVec, index {index} len {len}"); } let value_len = element.encode_var_ule_len(); assert!(value_len < F::Index::MAX_VALUE as usize); unsafe { let place = self.shift(index, value_len, ShiftType::Replace); element.encode_var_ule_write(place); } } } impl<T: VarULE + ?Sized, F: VarZeroVecFormat> fmt::Debug for VarZeroVecOwned<T, F> where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { VarZeroSlice::fmt(self, f) } } impl<T: VarULE + ?Sized, F> Default for VarZeroVecOwned<T, F> { fn default() -> Self { Self::new() } } impl<T, A, F> PartialEq<&'_ [A]> for VarZeroVecOwned<T, F> where T: VarULE + ?Sized, T: PartialEq, A: AsRef<T>, F: VarZeroVecFormat, { #[inline] fn eq(&self, other: &&[A]) -> bool { self.iter().eq(other.iter().map(|t| t.as_ref())) } } impl<'a, T: ?Sized + VarULE, F: VarZeroVecFormat> From<&'a VarZeroSlice<T, F>> for VarZeroVecOwned<T, F> { fn from(other: &'a VarZeroSlice<T, F>) -> Self { Self::from_slice(other) } } #[cfg(test)] mod test { use super::VarZeroVecOwned; #[test] fn test_insert_integrity() { let mut items: Vec<String> = Vec::new(); let mut zerovec = VarZeroVecOwned::<str>::new(); // Insert into an empty vec. items.insert(0, "1234567890".into()); zerovec.insert(0, "1234567890"); assert_eq!(zerovec, &*items); zerovec.insert(1, "foo3"); items.insert(1, "foo3".into()); assert_eq!(zerovec, &*items); // Insert at the end. items.insert(items.len(), "qwertyuiop".into()); zerovec.insert(zerovec.len(), "qwertyuiop"); assert_eq!(zerovec, &*items); items.insert(0, "asdfghjkl;".into()); zerovec.insert(0, "asdfghjkl;"); assert_eq!(zerovec, &*items); items.insert(2, "".into()); zerovec.insert(2, ""); assert_eq!(zerovec, &*items); } #[test] // ensure that inserting empty items works fn test_empty_inserts() { let mut items: Vec<String> = Vec::new(); let mut zerovec = VarZeroVecOwned::<str>::new(); // Insert into an empty vec. items.insert(0, "".into()); zerovec.insert(0, ""); assert_eq!(zerovec, &*items); items.insert(0, "".into()); zerovec.insert(0, ""); assert_eq!(zerovec, &*items); items.insert(0, "1234567890".into()); zerovec.insert(0, "1234567890"); assert_eq!(zerovec, &*items); items.insert(0, "".into()); zerovec.insert(0, ""); assert_eq!(zerovec, &*items); } #[test] fn test_small_insert_integrity() { // Tests that insert() works even when there // is not enough space for the new index in entire_slice.len() let mut items: Vec<String> = Vec::new(); let mut zerovec = VarZeroVecOwned::<str>::new(); // Insert into an empty vec. items.insert(0, "abc".into()); zerovec.insert(0, "abc"); assert_eq!(zerovec, &*items); zerovec.insert(1, "def"); items.insert(1, "def".into()); assert_eq!(zerovec, &*items); } #[test] #[should_panic] fn test_insert_past_end() { VarZeroVecOwned::<str>::new().insert(1, ""); } #[test] fn test_remove_integrity() { let mut items: Vec<&str> = vec!["apples", "bananas", "eeples", "", "baneenees", "five", ""]; let mut zerovec = VarZeroVecOwned::<str>::try_from_elements(&items).unwrap(); for index in [0, 2, 4, 0, 1, 1, 0] { items.remove(index); zerovec.remove(index); assert_eq!(zerovec, &*items, "index {}, len {}", index, items.len()); } } #[test] fn test_removing_last_element_clears() { let mut zerovec = VarZeroVecOwned::<str>::try_from_elements(&["buy some apples"]).unwrap(); assert!(!zerovec.as_bytes().is_empty()); zerovec.remove(0); assert!(zerovec.as_bytes().is_empty()); } #[test] #[should_panic] fn test_remove_past_end() { VarZeroVecOwned::<str>::new().remove(0); } #[test] fn test_replace_integrity() { let mut items: Vec<&str> = vec!["apples", "bananas", "eeples", "", "baneenees", "five", ""]; let mut zerovec = VarZeroVecOwned::<str>::try_from_elements(&items).unwrap(); // Replace with an element of the same size (and the first element) items[0] = "blablah"; zerovec.replace(0, "blablah"); assert_eq!(zerovec, &*items); // Replace with a smaller element items[1] = "twily"; zerovec.replace(1, "twily"); assert_eq!(zerovec, &*items); // Replace an empty element items[3] = "aoeuidhtns"; zerovec.replace(3, "aoeuidhtns"); assert_eq!(zerovec, &*items); // Replace the last element items[6] = "0123456789"; zerovec.replace(6, "0123456789"); assert_eq!(zerovec, &*items); // Replace with an empty element items[2] = ""; zerovec.replace(2, ""); assert_eq!(zerovec, &*items); } #[test] #[should_panic] fn test_replace_past_end() { VarZeroVecOwned::<str>::new().replace(0, ""); } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use super::{VarZeroSlice, VarZeroVec, VarZeroVecFormat}; use crate::ule::*; use alloc::boxed::Box; use alloc::vec::Vec; use core::fmt; use core::marker::PhantomData; use serde::de::{self, Deserialize, Deserializer, SeqAccess, Visitor}; #[cfg(feature = "serde")] use serde::ser::{Serialize, SerializeSeq, Serializer}; struct VarZeroVecVisitor<T: ?Sized, F: VarZeroVecFormat> { #[allow(clippy::type_complexity)] // this is a private marker type, who cares marker: PhantomData<(fn() -> Box<T>, F)>, } impl<T: ?Sized, F: VarZeroVecFormat> Default for VarZeroVecVisitor<T, F> { fn default() -> Self { Self { marker: PhantomData, } } } impl<'de, T, F> Visitor<'de> for VarZeroVecVisitor<T, F> where T: VarULE + ?Sized, Box<T>: Deserialize<'de>, F: VarZeroVecFormat, { type Value = VarZeroVec<'de, T, F>; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("a sequence or borrowed buffer of bytes") } fn visit_borrowed_bytes<E>(self, bytes: &'de [u8]) -> Result<Self::Value, E> where E: de::Error, { VarZeroVec::parse_bytes(bytes).map_err(de::Error::custom) } fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error> where A: SeqAccess<'de>, { let mut vec: Vec<Box<T>> = if let Some(capacity) = seq.size_hint() { Vec::with_capacity(capacity) } else { Vec::new() }; while let Some(value) = seq.next_element::<Box<T>>()? { vec.push(value); } Ok(VarZeroVec::from(&vec)) } } /// This impl requires enabling the optional `serde` Cargo feature of the `zerovec` crate impl<'de, 'a, T, F> Deserialize<'de> for VarZeroVec<'a, T, F> where T: VarULE + ?Sized, Box<T>: Deserialize<'de>, F: VarZeroVecFormat, 'de: 'a, { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { let visitor = VarZeroVecVisitor::<T, F>::default(); if deserializer.is_human_readable() { deserializer.deserialize_seq(visitor) } else { deserializer.deserialize_bytes(visitor) } } } /// This impl requires enabling the optional `serde` Cargo feature of the `zerovec` crate impl<'de, 'a, T, F> Deserialize<'de> for &'a VarZeroSlice<T, F> where T: VarULE + ?Sized, F: VarZeroVecFormat, 'de: 'a, { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { if deserializer.is_human_readable() { Err(de::Error::custom( "&VarZeroSlice cannot be deserialized from human-readable formats", )) } else { let bytes = <&[u8]>::deserialize(deserializer)?; VarZeroSlice::<T, F>::parse_bytes(bytes).map_err(de::Error::custom) } } } /// This impl requires enabling the optional `serde` Cargo feature of the `zerovec` crate impl<'de, T, F> Deserialize<'de> for Box<VarZeroSlice<T, F>> where T: VarULE + ?Sized, Box<T>: Deserialize<'de>, F: VarZeroVecFormat, { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { let deserialized = VarZeroVec::<T, F>::deserialize(deserializer)?; Ok(deserialized.to_boxed()) } } /// This impl requires enabling the optional `serde` Cargo feature of the `zerovec` crate #[cfg(feature = "serde")] impl<T, F> Serialize for VarZeroVec<'_, T, F> where T: Serialize + VarULE + ?Sized, F: VarZeroVecFormat, { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { if serializer.is_human_readable() { let mut seq = serializer.serialize_seq(Some(self.len()))?; for value in self.iter() { seq.serialize_element(value)?; } seq.end() } else { serializer.serialize_bytes(self.as_bytes()) } } } /// This impl requires enabling the optional `serde` Cargo feature of the `zerovec` crate #[cfg(feature = "serde")] impl<T, F> Serialize for VarZeroSlice<T, F> where T: Serialize + VarULE + ?Sized, F: VarZeroVecFormat, { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { self.as_varzerovec().serialize(serializer) } } #[cfg(test)] #[allow(non_camel_case_types)] mod test { use crate::{VarZeroSlice, VarZeroVec}; #[derive(serde::Serialize, serde::Deserialize)] struct DeriveTest_VarZeroVec<'data> { #[serde(borrow)] _data: VarZeroVec<'data, str>, } #[derive(serde::Serialize, serde::Deserialize)] struct DeriveTest_VarZeroSlice<'data> { #[serde(borrow)] _data: &'data VarZeroSlice<str>, } #[derive(serde::Serialize, serde::Deserialize)] struct DeriveTest_VarZeroVec_of_VarZeroSlice<'data> { #[serde(borrow)] _data: VarZeroVec<'data, VarZeroSlice<str>>, } // ["foo", "bar", "baz", "dolor", "quux", "lorem ipsum"]; const BYTES: &[u8] = &[ 6, 0, 3, 0, 6, 0, 9, 0, 14, 0, 18, 0, 102, 111, 111, 98, 97, 114, 98, 97, 122, 100, 111, 108, 111, 114, 113, 117, 117, 120, 108, 111, 114, 101, 109, 32, 105, 112, 115, 117, 109, ]; const JSON_STR: &str = "[\"foo\",\"bar\",\"baz\",\"dolor\",\"quux\",\"lorem ipsum\"]"; const BINCODE_BUF: &[u8] = &[ 41, 0, 0, 0, 0, 0, 0, 0, 6, 0, 3, 0, 6, 0, 9, 0, 14, 0, 18, 0, 102, 111, 111, 98, 97, 114, 98, 97, 122, 100, 111, 108, 111, 114, 113, 117, 117, 120, 108, 111, 114, 101, 109, 32, 105, 112, 115, 117, 109, ]; // ["w", "ω", "文", "𑄃"] const NONASCII_STR: &[&str] = &["w", "ω", "文", "𑄃"]; const NONASCII_BYTES: &[u8] = &[ 4, 0, 1, 0, 3, 0, 6, 0, 119, 207, 137, 230, 150, 135, 240, 145, 132, 131, ]; #[test] fn test_serde_json() { let zerovec_orig: VarZeroVec<str> = VarZeroVec::parse_bytes(BYTES).expect("parse"); let json_str = serde_json::to_string(&zerovec_orig).expect("serialize"); assert_eq!(JSON_STR, json_str); // VarZeroVec should deserialize from JSON to either Vec or VarZeroVec let vec_new: Vec<Box<str>> = serde_json::from_str(&json_str).expect("deserialize from buffer to Vec"); assert_eq!(zerovec_orig.to_vec(), vec_new); let zerovec_new: VarZeroVec<str> = serde_json::from_str(&json_str).expect("deserialize from buffer to VarZeroVec"); assert_eq!(zerovec_orig.to_vec(), zerovec_new.to_vec()); assert!(zerovec_new.is_owned()); } #[test] fn test_serde_bincode() { let zerovec_orig: VarZeroVec<str> = VarZeroVec::parse_bytes(BYTES).expect("parse"); let bincode_buf = bincode::serialize(&zerovec_orig).expect("serialize"); assert_eq!(BINCODE_BUF, bincode_buf); let zerovec_new: VarZeroVec<str> = bincode::deserialize(&bincode_buf).expect("deserialize from buffer to VarZeroVec"); assert_eq!(zerovec_orig.to_vec(), zerovec_new.to_vec()); assert!(!zerovec_new.is_owned()); } #[test] fn test_vzv_borrowed() { let zerovec_orig: &VarZeroSlice<str> = VarZeroSlice::parse_bytes(BYTES).expect("parse"); let bincode_buf = bincode::serialize(&zerovec_orig).expect("serialize"); assert_eq!(BINCODE_BUF, bincode_buf); let zerovec_new: &VarZeroSlice<str> = bincode::deserialize(&bincode_buf).expect("deserialize from buffer to VarZeroSlice"); assert_eq!(zerovec_orig.to_vec(), zerovec_new.to_vec()); } #[test] fn test_nonascii_bincode() { let src_vec = NONASCII_STR .iter() .copied() .map(Box::<str>::from) .collect::<Vec<_>>(); let mut zerovec: VarZeroVec<str> = VarZeroVec::parse_bytes(NONASCII_BYTES).expect("parse"); assert_eq!(zerovec.to_vec(), src_vec); let bincode_buf = bincode::serialize(&zerovec).expect("serialize"); let zerovec_result = bincode::deserialize::<VarZeroVec<str>>(&bincode_buf).expect("deserialize"); assert_eq!(zerovec_result.to_vec(), src_vec); // try again with owned zerovec zerovec.make_mut(); let bincode_buf = bincode::serialize(&zerovec).expect("serialize"); let zerovec_result = bincode::deserialize::<VarZeroVec<str>>(&bincode_buf).expect("deserialize"); assert_eq!(zerovec_result.to_vec(), src_vec); } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use super::components::{VarZeroSliceIter, VarZeroVecComponents}; use super::vec::VarZeroVecInner; use super::*; use crate::ule::*; use core::cmp::{Ord, Ordering, PartialOrd}; use core::fmt; use core::marker::PhantomData; use core::mem; use core::ops::Index; use core::ops::Range; /// A zero-copy "slice", that works for unsized types, i.e. the zero-copy version of `[T]` /// where `T` is not `Sized`. /// /// This behaves similarly to [`VarZeroVec<T>`], however [`VarZeroVec<T>`] is allowed to contain /// owned data and as such is ideal for deserialization since most human readable /// serialization formats cannot unconditionally deserialize zero-copy. /// /// This type can be used inside [`VarZeroVec<T>`](crate::VarZeroVec) and [`ZeroMap`](crate::ZeroMap): /// This essentially allows for the construction of zero-copy types isomorphic to `Vec<Vec<T>>` by instead /// using `VarZeroVec<ZeroSlice<T>>`. /// /// The `F` type parameter is a [`VarZeroVecFormat`] (see its docs for more details), which can be used to select the /// precise format of the backing buffer with various size and performance tradeoffs. It defaults to [`Index16`]. /// /// This type can be nested within itself to allow for multi-level nested `Vec`s. /// /// # Examples /// /// ## Nested Slices /// /// The following code constructs the conceptual zero-copy equivalent of `Vec<Vec<Vec<str>>>` /// /// ```rust /// use zerovec::{VarZeroSlice, VarZeroVec}; /// let strings_1: Vec<&str> = vec!["foo", "bar", "baz"]; /// let strings_2: Vec<&str> = vec!["twelve", "seventeen", "forty two"]; /// let strings_3: Vec<&str> = vec!["我", "喜歡", "烏龍茶"]; /// let strings_4: Vec<&str> = vec!["w", "ω", "文", "𑄃"]; /// let strings_12 = vec![&*strings_1, &*strings_2]; /// let strings_34 = vec![&*strings_3, &*strings_4]; /// let all_strings = vec![strings_12, strings_34]; /// /// let vzv_1: VarZeroVec<str> = VarZeroVec::from(&strings_1); /// let vzv_2: VarZeroVec<str> = VarZeroVec::from(&strings_2); /// let vzv_3: VarZeroVec<str> = VarZeroVec::from(&strings_3); /// let vzv_4: VarZeroVec<str> = VarZeroVec::from(&strings_4); /// let vzv_12 = VarZeroVec::from(&[vzv_1.as_slice(), vzv_2.as_slice()]); /// let vzv_34 = VarZeroVec::from(&[vzv_3.as_slice(), vzv_4.as_slice()]); /// let vzv_all = VarZeroVec::from(&[vzv_12.as_slice(), vzv_34.as_slice()]); /// /// let reconstructed: Vec<Vec<Vec<String>>> = vzv_all /// .iter() /// .map(|v: &VarZeroSlice<VarZeroSlice<str>>| { /// v.iter() /// .map(|x: &VarZeroSlice<_>| { /// x.as_varzerovec() /// .iter() /// .map(|s| s.to_owned()) /// .collect::<Vec<String>>() /// }) /// .collect::<Vec<_>>() /// }) /// .collect::<Vec<_>>(); /// assert_eq!(reconstructed, all_strings); /// /// let bytes = vzv_all.as_bytes(); /// let vzv_from_bytes: VarZeroVec<VarZeroSlice<VarZeroSlice<str>>> = /// VarZeroVec::parse_bytes(bytes).unwrap(); /// assert_eq!(vzv_from_bytes, vzv_all); /// ``` /// /// ## Iterate over Windows /// /// Although [`VarZeroSlice`] does not itself have a `.windows` iterator like /// [core::slice::Windows], this behavior can be easily modeled using an iterator: /// /// ``` /// use zerovec::VarZeroVec; /// /// let vzv = VarZeroVec::<str>::from(&["a", "b", "c", "d"]); /// # let mut pairs: Vec<(&str, &str)> = Vec::new(); /// /// let mut it = vzv.iter().peekable(); /// while let (Some(x), Some(y)) = (it.next(), it.peek()) { /// // Evaluate (x, y) here. /// # pairs.push((x, y)); /// } /// # assert_eq!(pairs, &[("a", "b"), ("b", "c"), ("c", "d")]); /// ``` // // safety invariant: The slice MUST be one which parses to // a valid VarZeroVecComponents<T> #[repr(transparent)] pub struct VarZeroSlice<T: ?Sized, F = Index16> { marker: PhantomData<(F, T)>, /// The original slice this was constructed from entire_slice: [u8], } impl<T: VarULE + ?Sized, F: VarZeroVecFormat> VarZeroSlice<T, F> { /// Construct a new empty VarZeroSlice pub const fn new_empty() -> &'static Self { // The empty VZV is special-cased to the empty slice unsafe { mem::transmute(&[] as &[u8]) } } /// Obtain a [`VarZeroVecComponents`] borrowing from the internal buffer #[inline] pub(crate) fn as_components<'a>(&'a self) -> VarZeroVecComponents<'a, T, F> { unsafe { // safety: VarZeroSlice is guaranteed to parse here VarZeroVecComponents::from_bytes_unchecked(&self.entire_slice) } } /// Uses a `&[u8]` buffer as a `VarZeroSlice<T>` without any verification. /// /// # Safety /// /// `bytes` need to be an output from [`VarZeroSlice::as_bytes()`]. pub const unsafe fn from_bytes_unchecked(bytes: &[u8]) -> &Self { // self is really just a wrapper around a byte slice mem::transmute(bytes) } /// Get the number of elements in this slice /// /// # Example /// /// ```rust /// # use zerovec::VarZeroVec; /// /// let strings = vec!["foo", "bar", "baz", "quux"]; /// let vec = VarZeroVec::<str>::from(&strings); /// /// assert_eq!(vec.len(), 4); /// ``` pub fn len(&self) -> usize { self.as_components().len() } /// Returns `true` if the slice contains no elements. /// /// # Examples /// /// ``` /// # use zerovec::VarZeroVec; /// /// let strings: Vec<String> = vec![]; /// let vec = VarZeroVec::<str>::from(&strings); /// /// assert!(vec.is_empty()); /// ``` pub fn is_empty(&self) -> bool { self.as_components().is_empty() } /// Obtain an iterator over this slice's elements /// /// # Example /// /// ```rust /// # use zerovec::VarZeroVec; /// /// let strings = vec!["foo", "bar", "baz", "quux"]; /// let vec = VarZeroVec::<str>::from(&strings); /// /// let mut iter_results: Vec<&str> = vec.iter().collect(); /// assert_eq!(iter_results[0], "foo"); /// assert_eq!(iter_results[1], "bar"); /// assert_eq!(iter_results[2], "baz"); /// assert_eq!(iter_results[3], "quux"); /// ``` pub fn iter<'b>(&'b self) -> VarZeroSliceIter<'b, T, F> { self.as_components().iter() } /// Get one of this slice's elements, returning `None` if the index is out of bounds /// /// # Example /// /// ```rust /// # use zerovec::VarZeroVec; /// /// let strings = vec!["foo", "bar", "baz", "quux"]; /// let vec = VarZeroVec::<str>::from(&strings); /// /// let mut iter_results: Vec<&str> = vec.iter().collect(); /// assert_eq!(vec.get(0), Some("foo")); /// assert_eq!(vec.get(1), Some("bar")); /// assert_eq!(vec.get(2), Some("baz")); /// assert_eq!(vec.get(3), Some("quux")); /// assert_eq!(vec.get(4), None); /// ``` pub fn get(&self, idx: usize) -> Option<&T> { self.as_components().get(idx) } /// Get one of this slice's elements /// /// # Safety /// /// `index` must be in range /// /// # Example /// /// ```rust /// # use zerovec::VarZeroVec; /// /// let strings = vec!["foo", "bar", "baz", "quux"]; /// let vec = VarZeroVec::<str>::from(&strings); /// /// let mut iter_results: Vec<&str> = vec.iter().collect(); /// unsafe { /// assert_eq!(vec.get_unchecked(0), "foo"); /// assert_eq!(vec.get_unchecked(1), "bar"); /// assert_eq!(vec.get_unchecked(2), "baz"); /// assert_eq!(vec.get_unchecked(3), "quux"); /// } /// ``` pub unsafe fn get_unchecked(&self, idx: usize) -> &T { self.as_components().get_unchecked(idx) } /// Obtain an owned `Vec<Box<T>>` out of this #[cfg(feature = "alloc")] pub fn to_vec(&self) -> alloc::vec::Vec<alloc::boxed::Box<T>> { self.as_components().to_vec() } /// Get a reference to the entire encoded backing buffer of this slice /// /// The bytes can be passed back to [`Self::parse_bytes()`]. /// /// To take the bytes as a vector, see [`VarZeroVec::into_bytes()`]. /// /// # Example /// /// ```rust /// # use zerovec::VarZeroVec; /// /// let strings = vec!["foo", "bar", "baz"]; /// let vzv = VarZeroVec::<str>::from(&strings); /// /// assert_eq!(vzv, VarZeroVec::parse_bytes(vzv.as_bytes()).unwrap()); /// ``` #[inline] pub const fn as_bytes(&self) -> &[u8] { &self.entire_slice } /// Get this [`VarZeroSlice`] as a borrowed [`VarZeroVec`] /// /// If you wish to repeatedly call methods on this [`VarZeroSlice`], /// it is more efficient to perform this conversion first pub const fn as_varzerovec<'a>(&'a self) -> VarZeroVec<'a, T, F> { VarZeroVec(VarZeroVecInner::Borrowed(self)) } /// Parse a VarZeroSlice from a slice of the appropriate format /// /// Slices of the right format can be obtained via [`VarZeroSlice::as_bytes()`] pub fn parse_bytes<'a>(slice: &'a [u8]) -> Result<&'a Self, UleError> { <Self as VarULE>::parse_bytes(slice) } } impl<T, F> VarZeroSlice<T, F> where T: VarULE, T: ?Sized, T: Ord, F: VarZeroVecFormat, { /// Binary searches a sorted `VarZeroVec<T>` for the given element. For more information, see /// the standard library function [`binary_search`]. /// /// # Example /// /// ``` /// # use zerovec::VarZeroVec; /// /// let strings = vec!["a", "b", "f", "g"]; /// let vec = VarZeroVec::<str>::from(&strings); /// /// assert_eq!(vec.binary_search("f"), Ok(2)); /// assert_eq!(vec.binary_search("e"), Err(2)); /// ``` /// /// [`binary_search`]: https://doc.rust-lang.org/std/primitive.slice.html#method.binary_search #[inline] pub fn binary_search(&self, x: &T) -> Result<usize, usize> { self.as_components().binary_search(x) } /// Binary searches a `VarZeroVec<T>` for the given element within a certain sorted range. /// /// If the range is out of bounds, returns `None`. Otherwise, returns a `Result` according /// to the behavior of the standard library function [`binary_search`]. /// /// The index is returned relative to the start of the range. /// /// # Example /// /// ``` /// # use zerovec::VarZeroVec; /// let strings = vec!["a", "b", "f", "g", "m", "n", "q"]; /// let vec = VarZeroVec::<str>::from(&strings); /// /// // Same behavior as binary_search when the range covers the whole slice: /// assert_eq!(vec.binary_search_in_range("g", 0..7), Some(Ok(3))); /// assert_eq!(vec.binary_search_in_range("h", 0..7), Some(Err(4))); /// /// // Will not look outside of the range: /// assert_eq!(vec.binary_search_in_range("g", 0..1), Some(Err(1))); /// assert_eq!(vec.binary_search_in_range("g", 6..7), Some(Err(0))); /// /// // Will return indices relative to the start of the range: /// assert_eq!(vec.binary_search_in_range("g", 1..6), Some(Ok(2))); /// assert_eq!(vec.binary_search_in_range("h", 1..6), Some(Err(3))); /// /// // Will return `None` if the range is out of bounds: /// assert_eq!(vec.binary_search_in_range("x", 100..200), None); /// assert_eq!(vec.binary_search_in_range("x", 0..200), None); /// ``` /// /// [`binary_search`]: https://doc.rust-lang.org/std/primitive.slice.html#method.binary_search #[inline] pub fn binary_search_in_range( &self, x: &T, range: Range<usize>, ) -> Option<Result<usize, usize>> { self.as_components().binary_search_in_range(x, range) } } impl<T, F> VarZeroSlice<T, F> where T: VarULE, T: ?Sized, F: VarZeroVecFormat, { /// Binary searches a sorted `VarZeroVec<T>` for the given predicate. For more information, see /// the standard library function [`binary_search_by`]. /// /// # Example /// /// ``` /// # use zerovec::VarZeroVec; /// let strings = vec!["a", "b", "f", "g"]; /// let vec = VarZeroVec::<str>::from(&strings); /// /// assert_eq!(vec.binary_search_by(|probe| probe.cmp("f")), Ok(2)); /// assert_eq!(vec.binary_search_by(|probe| probe.cmp("e")), Err(2)); /// ``` /// /// [`binary_search_by`]: https://doc.rust-lang.org/std/primitive.slice.html#method.binary_search_by #[inline] pub fn binary_search_by(&self, predicate: impl FnMut(&T) -> Ordering) -> Result<usize, usize> { self.as_components().binary_search_by(predicate) } /// Binary searches a `VarZeroVec<T>` for the given predicate within a certain sorted range. /// /// If the range is out of bounds, returns `None`. Otherwise, returns a `Result` according /// to the behavior of the standard library function [`binary_search`]. /// /// The index is returned relative to the start of the range. /// /// # Example /// /// ``` /// # use zerovec::VarZeroVec; /// let strings = vec!["a", "b", "f", "g", "m", "n", "q"]; /// let vec = VarZeroVec::<str>::from(&strings); /// /// // Same behavior as binary_search when the range covers the whole slice: /// assert_eq!( /// vec.binary_search_in_range_by(|v| v.cmp("g"), 0..7), /// Some(Ok(3)) /// ); /// assert_eq!( /// vec.binary_search_in_range_by(|v| v.cmp("h"), 0..7), /// Some(Err(4)) /// ); /// /// // Will not look outside of the range: /// assert_eq!( /// vec.binary_search_in_range_by(|v| v.cmp("g"), 0..1), /// Some(Err(1)) /// ); /// assert_eq!( /// vec.binary_search_in_range_by(|v| v.cmp("g"), 6..7), /// Some(Err(0)) /// ); /// /// // Will return indices relative to the start of the range: /// assert_eq!( /// vec.binary_search_in_range_by(|v| v.cmp("g"), 1..6), /// Some(Ok(2)) /// ); /// assert_eq!( /// vec.binary_search_in_range_by(|v| v.cmp("h"), 1..6), /// Some(Err(3)) /// ); /// /// // Will return `None` if the range is out of bounds: /// assert_eq!( /// vec.binary_search_in_range_by(|v| v.cmp("x"), 100..200), /// None /// ); /// assert_eq!(vec.binary_search_in_range_by(|v| v.cmp("x"), 0..200), None); /// ``` /// /// [`binary_search`]: https://doc.rust-lang.org/std/primitive.slice.html#method.binary_search pub fn binary_search_in_range_by( &self, predicate: impl FnMut(&T) -> Ordering, range: Range<usize>, ) -> Option<Result<usize, usize>> { self.as_components() .binary_search_in_range_by(predicate, range) } } // Safety (based on the safety checklist on the VarULE trait): // 1. VarZeroSlice does not include any uninitialized or padding bytes (achieved by `#[repr(transparent)]` on a // `[u8]` slice which satisfies this invariant) // 2. VarZeroSlice is aligned to 1 byte (achieved by `#[repr(transparent)]` on a // `[u8]` slice which satisfies this invariant) // 3. The impl of `validate_bytes()` returns an error if any byte is not valid. // 4. The impl of `validate_bytes()` returns an error if the slice cannot be used in its entirety // 5. The impl of `from_bytes_unchecked()` returns a reference to the same data. // 6. `as_bytes()` is equivalent to a regular transmute of the underlying data // 7. VarZeroSlice byte equality is semantic equality (relying on the guideline of the underlying VarULE type) unsafe impl<T: VarULE + ?Sized + 'static, F: VarZeroVecFormat> VarULE for VarZeroSlice<T, F> { fn validate_bytes(bytes: &[u8]) -> Result<(), UleError> { let _: VarZeroVecComponents<T, F> = VarZeroVecComponents::parse_bytes(bytes).map_err(|_| UleError::parse::<Self>())?; Ok(()) } unsafe fn from_bytes_unchecked(bytes: &[u8]) -> &Self { // self is really just a wrapper around a byte slice mem::transmute(bytes) } fn as_bytes(&self) -> &[u8] { &self.entire_slice } } impl<T: VarULE + ?Sized, F: VarZeroVecFormat> Index<usize> for VarZeroSlice<T, F> { type Output = T; fn index(&self, index: usize) -> &Self::Output { #[allow(clippy::panic)] // documented match self.get(index) { Some(x) => x, None => panic!( "index out of bounds: the len is {} but the index is {index}", self.len() ), } } } impl<T, F> PartialEq<VarZeroSlice<T, F>> for VarZeroSlice<T, F> where T: VarULE, T: ?Sized, T: PartialEq, F: VarZeroVecFormat, { #[inline] fn eq(&self, other: &VarZeroSlice<T, F>) -> bool { // VarULE has an API guarantee that this is equivalent // to `T::VarULE::eq()` self.entire_slice.eq(&other.entire_slice) } } impl<T, F> Eq for VarZeroSlice<T, F> where T: VarULE, T: ?Sized, T: Eq, F: VarZeroVecFormat, { } impl<T: VarULE + ?Sized + PartialOrd, F: VarZeroVecFormat> PartialOrd for VarZeroSlice<T, F> { #[inline] fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.iter().partial_cmp(other.iter()) } } impl<T: VarULE + ?Sized + Ord, F: VarZeroVecFormat> Ord for VarZeroSlice<T, F> { #[inline] fn cmp(&self, other: &Self) -> Ordering { self.iter().cmp(other.iter()) } } impl<T: VarULE + ?Sized, F: VarZeroVecFormat> fmt::Debug for VarZeroSlice<T, F> where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.iter()).finish() } } impl<T: ?Sized, F: VarZeroVecFormat> AsRef<VarZeroSlice<T, F>> for VarZeroSlice<T, F> { fn as_ref(&self) -> &VarZeroSlice<T, F> { self } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use crate::ule::*; use core::cmp::{Ord, Ordering, PartialOrd}; use core::fmt; use core::ops::Deref; use super::*; /// A zero-copy, byte-aligned vector for variable-width types. /// /// `VarZeroVec<T>` is designed as a drop-in replacement for `Vec<T>` in situations where it is /// desirable to borrow data from an unaligned byte slice, such as zero-copy deserialization, and /// where `T`'s data is variable-length (e.g. `String`) /// /// `T` must implement [`VarULE`], which is already implemented for [`str`] and `[u8]`. For storing more /// complicated series of elements, it is implemented on `ZeroSlice<T>` as well as `VarZeroSlice<T>` /// for nesting. [`zerovec::make_varule`](crate::make_varule) may be used to generate /// a dynamically-sized [`VarULE`] type and conversions to and from a custom type. /// /// For example, here are some owned types and their zero-copy equivalents: /// /// - `Vec<String>`: `VarZeroVec<'a, str>` /// - `Vec<Vec<u8>>>`: `VarZeroVec<'a, [u8]>` /// - `Vec<Vec<u32>>`: `VarZeroVec<'a, ZeroSlice<u32>>` /// - `Vec<Vec<String>>`: `VarZeroVec<'a, VarZeroSlice<str>>` /// /// Most of the methods on `VarZeroVec<'a, T>` come from its [`Deref`] implementation to [`VarZeroSlice<T>`](VarZeroSlice). /// /// For creating zero-copy vectors of fixed-size types, see [`ZeroVec`](crate::ZeroVec). /// /// `VarZeroVec<T>` behaves much like [`Cow`](alloc::borrow::Cow), where it can be constructed from /// owned data (and then mutated!) but can also borrow from some buffer. /// /// The `F` type parameter is a [`VarZeroVecFormat`] (see its docs for more details), which can be used to select the /// precise format of the backing buffer with various size and performance tradeoffs. It defaults to [`Index16`]. /// /// # Bytes and Equality /// /// Two [`VarZeroVec`]s are equal if and only if their bytes are equal, as described in the trait /// [`VarULE`]. However, we do not guarantee stability of byte equality or serialization format /// across major SemVer releases. /// /// To compare a [`Vec<T>`] to a [`VarZeroVec<T>`], it is generally recommended to use /// [`Iterator::eq`], since it is somewhat expensive at runtime to convert from a [`Vec<T>`] to a /// [`VarZeroVec<T>`] or vice-versa. /// /// Prior to zerovec reaching 1.0, the precise byte representation of [`VarZeroVec`] is still /// under consideration, with different options along the space-time spectrum. See /// [#1410](https://github.com/unicode-org/icu4x/issues/1410). /// /// # Example /// /// ```rust /// use zerovec::VarZeroVec; /// /// // The little-endian bytes correspond to the list of strings. /// let strings = vec!["w", "ω", "文", "𑄃"]; /// /// #[derive(serde::Serialize, serde::Deserialize)] /// struct Data<'a> { /// #[serde(borrow)] /// strings: VarZeroVec<'a, str>, /// } /// /// let data = Data { /// strings: VarZeroVec::from(&strings), /// }; /// /// let bincode_bytes = /// bincode::serialize(&data).expect("Serialization should be successful"); /// /// // Will deserialize without allocations /// let deserialized: Data = bincode::deserialize(&bincode_bytes) /// .expect("Deserialization should be successful"); /// /// assert_eq!(deserialized.strings.get(2), Some("文")); /// assert_eq!(deserialized.strings, &*strings); /// ``` /// /// Here's another example with `ZeroSlice<T>` (similar to `[T]`): /// /// ```rust /// use zerovec::VarZeroVec; /// use zerovec::ZeroSlice; /// /// // The structured list correspond to the list of integers. /// let numbers: &[&[u32]] = &[ /// &[12, 25, 38], /// &[39179, 100], /// &[42, 55555], /// &[12345, 54321, 9], /// ]; /// /// #[derive(serde::Serialize, serde::Deserialize)] /// struct Data<'a> { /// #[serde(borrow)] /// vecs: VarZeroVec<'a, ZeroSlice<u32>>, /// } /// /// let data = Data { /// vecs: VarZeroVec::from(numbers), /// }; /// /// let bincode_bytes = /// bincode::serialize(&data).expect("Serialization should be successful"); /// /// let deserialized: Data = bincode::deserialize(&bincode_bytes) /// .expect("Deserialization should be successful"); /// /// assert_eq!(deserialized.vecs[0].get(1).unwrap(), 25); /// assert_eq!(deserialized.vecs[1], *numbers[1]); /// ``` /// /// [`VarZeroVec`]s can be nested infinitely via a similar mechanism, see the docs of [`VarZeroSlice`] /// for more information. /// /// # How it Works /// /// `VarZeroVec<T>`, when used with non-human-readable serializers (like `bincode`), will /// serialize to a specially formatted list of bytes. The format is: /// /// - 2 bytes for `length` (interpreted as a little-endian u16) /// - `2 * (length - 1)` bytes of `indices` (interpreted as little-endian u16s) /// - Remaining bytes for actual `data` /// /// The format is tweakable by setting the `F` parameter, by default it uses u16 indices and lengths but other /// `VarZeroVecFormat` types can set other sizes. /// /// Each element in the `indices` array points to the ending index of its corresponding /// data part in the `data` list. The starting index can be calculated from the ending index /// of the next element (or 0 for the first element). The last ending index, not stored in the array, is /// the length of the `data` segment. /// /// See [the design doc](https://github.com/unicode-org/icu4x/blob/main/utils/zerovec/design_doc.md) for more details. /// /// [`ule`]: crate::ule pub struct VarZeroVec<'a, T: ?Sized, F = Index16>(pub(crate) VarZeroVecInner<'a, T, F>); pub(crate) enum VarZeroVecInner<'a, T: ?Sized, F = Index16> { #[cfg(feature = "alloc")] Owned(VarZeroVecOwned<T, F>), Borrowed(&'a VarZeroSlice<T, F>), } impl<'a, T: ?Sized, F> Clone for VarZeroVec<'a, T, F> { fn clone(&self) -> Self { match self.0 { #[cfg(feature = "alloc")] VarZeroVecInner::Owned(ref o) => o.clone().into(), VarZeroVecInner::Borrowed(b) => b.into(), } } } impl<T: VarULE + ?Sized, F: VarZeroVecFormat> fmt::Debug for VarZeroVec<'_, T, F> where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { VarZeroSlice::fmt(self, f) } } #[cfg(feature = "alloc")] impl<'a, T: ?Sized, F> From<VarZeroVecOwned<T, F>> for VarZeroVec<'a, T, F> { #[inline] fn from(other: VarZeroVecOwned<T, F>) -> Self { Self(VarZeroVecInner::Owned(other)) } } impl<'a, T: ?Sized, F> From<&'a VarZeroSlice<T, F>> for VarZeroVec<'a, T, F> { fn from(other: &'a VarZeroSlice<T, F>) -> Self { Self(VarZeroVecInner::Borrowed(other)) } } #[cfg(feature = "alloc")] impl<'a, T: ?Sized + VarULE, F: VarZeroVecFormat> From<VarZeroVec<'a, T, F>> for VarZeroVecOwned<T, F> { #[inline] fn from(other: VarZeroVec<'a, T, F>) -> Self { match other.0 { VarZeroVecInner::Owned(o) => o, VarZeroVecInner::Borrowed(b) => b.into(), } } } impl<T: VarULE + ?Sized> Default for VarZeroVec<'_, T> { #[inline] fn default() -> Self { Self::new() } } impl<T: VarULE + ?Sized, F: VarZeroVecFormat> Deref for VarZeroVec<'_, T, F> { type Target = VarZeroSlice<T, F>; fn deref(&self) -> &VarZeroSlice<T, F> { self.as_slice() } } impl<'a, T: VarULE + ?Sized, F: VarZeroVecFormat> VarZeroVec<'a, T, F> { /// Creates a new, empty `VarZeroVec<T>`. /// /// # Examples /// /// ``` /// use zerovec::VarZeroVec; /// /// let vzv: VarZeroVec<str> = VarZeroVec::new(); /// assert!(vzv.is_empty()); /// ``` #[inline] pub const fn new() -> Self { Self(VarZeroVecInner::Borrowed(VarZeroSlice::new_empty())) } /// Parse a VarZeroVec from a slice of the appropriate format /// /// Slices of the right format can be obtained via [`VarZeroSlice::as_bytes()`]. /// /// # Example /// /// ```rust /// # use zerovec::VarZeroVec; /// /// let strings = vec!["foo", "bar", "baz", "quux"]; /// let vec = VarZeroVec::<str>::from(&strings); /// /// assert_eq!(&vec[0], "foo"); /// assert_eq!(&vec[1], "bar"); /// assert_eq!(&vec[2], "baz"); /// assert_eq!(&vec[3], "quux"); /// ``` pub fn parse_bytes(slice: &'a [u8]) -> Result<Self, UleError> { let borrowed = VarZeroSlice::<T, F>::parse_bytes(slice)?; Ok(Self(VarZeroVecInner::Borrowed(borrowed))) } /// Uses a `&[u8]` buffer as a `VarZeroVec<T>` without any verification. /// /// # Safety /// /// `bytes` need to be an output from [`VarZeroSlice::as_bytes()`]. pub const unsafe fn from_bytes_unchecked(bytes: &'a [u8]) -> Self { Self(VarZeroVecInner::Borrowed(core::mem::transmute::< &[u8], &VarZeroSlice<T, F>, >(bytes))) } /// Convert this into a mutable vector of the owned `T` type, cloning if necessary. /// /// /// # Example /// /// ```rust,ignore /// # use zerovec::VarZeroVec; /// let strings = vec!["foo", "bar", "baz", "quux"]; /// let mut vec = VarZeroVec::<str>::from(&strings); /// /// assert_eq!(vec.len(), 4); /// let mutvec = vec.make_mut(); /// mutvec.push("lorem ipsum".into()); /// mutvec[2] = "dolor sit".into(); /// assert_eq!(&vec[0], "foo"); /// assert_eq!(&vec[1], "bar"); /// assert_eq!(&vec[2], "dolor sit"); /// assert_eq!(&vec[3], "quux"); /// assert_eq!(&vec[4], "lorem ipsum"); /// ``` // // This function is crate-public for now since we don't yet want to stabilize // the internal implementation details #[cfg(feature = "alloc")] pub fn make_mut(&mut self) -> &mut VarZeroVecOwned<T, F> { match self.0 { VarZeroVecInner::Owned(ref mut vec) => vec, VarZeroVecInner::Borrowed(slice) => { let new_self = VarZeroVecOwned::from_slice(slice); *self = new_self.into(); // recursion is limited since we are guaranteed to hit the Owned branch self.make_mut() } } } /// Converts a borrowed ZeroVec to an owned ZeroVec. No-op if already owned. /// /// # Example /// /// ``` /// # use zerovec::VarZeroVec; /// /// let strings = vec!["foo", "bar", "baz", "quux"]; /// let vec = VarZeroVec::<str>::from(&strings); /// /// assert_eq!(vec.len(), 4); /// // has 'static lifetime /// let owned = vec.into_owned(); /// ``` #[cfg(feature = "alloc")] pub fn into_owned(mut self) -> VarZeroVec<'static, T, F> { self.make_mut(); match self.0 { VarZeroVecInner::Owned(vec) => vec.into(), _ => unreachable!(), } } /// Obtain this `VarZeroVec` as a [`VarZeroSlice`] pub fn as_slice(&self) -> &VarZeroSlice<T, F> { match self.0 { #[cfg(feature = "alloc")] VarZeroVecInner::Owned(ref owned) => owned, VarZeroVecInner::Borrowed(b) => b, } } /// Takes the byte vector representing the encoded data of this VarZeroVec. If borrowed, /// this function allocates a byte vector and copies the borrowed bytes into it. /// /// The bytes can be passed back to [`Self::parse_bytes()`]. /// /// To get a reference to the bytes without moving, see [`VarZeroSlice::as_bytes()`]. /// /// # Example /// /// ```rust /// # use zerovec::VarZeroVec; /// /// let strings = vec!["foo", "bar", "baz"]; /// let bytes = VarZeroVec::<str>::from(&strings).into_bytes(); /// /// let mut borrowed: VarZeroVec<str> = /// VarZeroVec::parse_bytes(&bytes).unwrap(); /// assert_eq!(borrowed, &*strings); /// ``` #[cfg(feature = "alloc")] pub fn into_bytes(self) -> alloc::vec::Vec<u8> { match self.0 { #[cfg(feature = "alloc")] VarZeroVecInner::Owned(vec) => vec.into_bytes(), VarZeroVecInner::Borrowed(vec) => vec.as_bytes().to_vec(), } } /// Return whether the [`VarZeroVec`] is operating on owned or borrowed /// data. [`VarZeroVec::into_owned()`] and [`VarZeroVec::make_mut()`] can /// be used to force it into an owned type pub fn is_owned(&self) -> bool { match self.0 { #[cfg(feature = "alloc")] VarZeroVecInner::Owned(..) => true, VarZeroVecInner::Borrowed(..) => false, } } #[doc(hidden)] pub fn as_components<'b>(&'b self) -> VarZeroVecComponents<'b, T, F> { self.as_slice().as_components() } } #[cfg(feature = "alloc")] impl<A, T, F> From<&alloc::vec::Vec<A>> for VarZeroVec<'static, T, F> where T: VarULE + ?Sized, A: EncodeAsVarULE<T>, F: VarZeroVecFormat, { #[inline] fn from(elements: &alloc::vec::Vec<A>) -> Self { Self::from(elements.as_slice()) } } #[cfg(feature = "alloc")] impl<A, T, F> From<&[A]> for VarZeroVec<'static, T, F> where T: VarULE + ?Sized, A: EncodeAsVarULE<T>, F: VarZeroVecFormat, { #[inline] fn from(elements: &[A]) -> Self { if elements.is_empty() { VarZeroSlice::new_empty().into() } else { #[allow(clippy::unwrap_used)] // TODO(#1410) Better story for fallibility VarZeroVecOwned::try_from_elements(elements).unwrap().into() } } } #[cfg(feature = "alloc")] impl<A, T, F, const N: usize> From<&[A; N]> for VarZeroVec<'static, T, F> where T: VarULE + ?Sized, A: EncodeAsVarULE<T>, F: VarZeroVecFormat, { #[inline] fn from(elements: &[A; N]) -> Self { Self::from(elements.as_slice()) } } impl<'a, 'b, T, F> PartialEq<VarZeroVec<'b, T, F>> for VarZeroVec<'a, T, F> where T: VarULE, T: ?Sized, T: PartialEq, F: VarZeroVecFormat, { #[inline] fn eq(&self, other: &VarZeroVec<'b, T, F>) -> bool { // VZV::from_elements used to produce a non-canonical representation of the // empty VZV, so we cannot use byte equality for empty vecs. if self.is_empty() || other.is_empty() { return self.is_empty() && other.is_empty(); } // VarULE has an API guarantee that byte equality is semantic equality. // For non-empty VZVs, there's only a single metadata representation, // so this guarantee extends to the whole VZV representation. self.as_bytes().eq(other.as_bytes()) } } impl<'a, T, F> Eq for VarZeroVec<'a, T, F> where T: VarULE, T: ?Sized, T: Eq, F: VarZeroVecFormat, { } impl<T, A, F> PartialEq<&'_ [A]> for VarZeroVec<'_, T, F> where T: VarULE + ?Sized, T: PartialEq, A: AsRef<T>, F: VarZeroVecFormat, { #[inline] fn eq(&self, other: &&[A]) -> bool { self.iter().eq(other.iter().map(|t| t.as_ref())) } } impl<T, A, F, const N: usize> PartialEq<[A; N]> for VarZeroVec<'_, T, F> where T: VarULE + ?Sized, T: PartialEq, A: AsRef<T>, F: VarZeroVecFormat, { #[inline] fn eq(&self, other: &[A; N]) -> bool { self.iter().eq(other.iter().map(|t| t.as_ref())) } } impl<'a, T: VarULE + ?Sized + PartialOrd, F: VarZeroVecFormat> PartialOrd for VarZeroVec<'a, T, F> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.iter().partial_cmp(other.iter()) } } impl<'a, T: VarULE + ?Sized + Ord, F: VarZeroVecFormat> Ord for VarZeroVec<'a, T, F> { fn cmp(&self, other: &Self) -> Ordering { self.iter().cmp(other.iter()) } } #[test] fn assert_single_empty_representation() { assert_eq!( VarZeroVec::<str>::new().as_bytes(), VarZeroVec::<str>::from(&[] as &[&str]).as_bytes() ); use crate::map::MutableZeroVecLike; let mut vzv = VarZeroVec::<str>::from(&["hello", "world"][..]); assert_eq!(vzv.len(), 2); assert!(!vzv.as_bytes().is_empty()); vzv.zvl_remove(0); assert_eq!(vzv.len(), 1); assert!(!vzv.as_bytes().is_empty()); vzv.zvl_remove(0); assert_eq!(vzv.len(), 0); assert!(vzv.as_bytes().is_empty()); vzv.zvl_insert(0, "something"); assert_eq!(vzv.len(), 1); assert!(!vzv.as_bytes().is_empty()); } #[test] fn weird_empty_representation_equality() { assert_eq!( VarZeroVec::<str>::parse_bytes(&[0, 0, 0, 0]).unwrap(), VarZeroVec::<str>::parse_bytes(&[]).unwrap() ); }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). // This way we can copy-paste Yokeable impls #![allow(unknown_lints)] // forgetting_copy_types #![allow(renamed_and_removed_lints)] // forgetting_copy_types #![allow(forgetting_copy_types)] #![allow(clippy::forget_copy)] #![allow(clippy::forget_non_drop)] #[cfg(feature = "alloc")] use crate::map::ZeroMapBorrowed; #[cfg(feature = "alloc")] use crate::map::ZeroMapKV; #[cfg(feature = "alloc")] use crate::map2d::ZeroMap2dBorrowed; use crate::ule::*; use crate::{VarZeroCow, VarZeroVec, ZeroVec}; #[cfg(feature = "alloc")] use crate::{ZeroMap, ZeroMap2d}; use core::{mem, ptr}; use yoke::*; // This impl is similar to the impl on Cow and is safe for the same reasons /// This impl requires enabling the optional `yoke` Cargo feature of the `zerovec` crate unsafe impl<'a, T: 'static + AsULE> Yokeable<'a> for ZeroVec<'static, T> { type Output = ZeroVec<'a, T>; #[inline] fn transform(&'a self) -> &'a Self::Output { self } #[inline] fn transform_owned(self) -> Self::Output { self } #[inline] unsafe fn make(from: Self::Output) -> Self { debug_assert!(mem::size_of::<Self::Output>() == mem::size_of::<Self>()); let from = mem::ManuallyDrop::new(from); let ptr: *const Self = (&*from as *const Self::Output).cast(); ptr::read(ptr) } #[inline] fn transform_mut<F>(&'a mut self, f: F) where F: 'static + for<'b> FnOnce(&'b mut Self::Output), { unsafe { f(mem::transmute::<&mut Self, &mut Self::Output>(self)) } } } // This impl is similar to the impl on Cow and is safe for the same reasons /// This impl requires enabling the optional `yoke` Cargo feature of the `zerovec` crate unsafe impl<'a, T: 'static + VarULE + ?Sized> Yokeable<'a> for VarZeroVec<'static, T> { type Output = VarZeroVec<'a, T>; #[inline] fn transform(&'a self) -> &'a Self::Output { self } #[inline] fn transform_owned(self) -> Self::Output { self } #[inline] unsafe fn make(from: Self::Output) -> Self { debug_assert!(mem::size_of::<Self::Output>() == mem::size_of::<Self>()); let from = mem::ManuallyDrop::new(from); let ptr: *const Self = (&*from as *const Self::Output).cast(); ptr::read(ptr) } #[inline] fn transform_mut<F>(&'a mut self, f: F) where F: 'static + for<'b> FnOnce(&'b mut Self::Output), { unsafe { f(mem::transmute::<&mut Self, &mut Self::Output>(self)) } } } // This impl is similar to the impl on Cow and is safe for the same reasons /// This impl requires enabling the optional `yoke` Cargo feature of the `zerovec` crate unsafe impl<'a, T: 'static + ?Sized> Yokeable<'a> for VarZeroCow<'static, T> { type Output = VarZeroCow<'a, T>; #[inline] fn transform(&'a self) -> &'a Self::Output { self } #[inline] fn transform_owned(self) -> Self::Output { self } #[inline] unsafe fn make(from: Self::Output) -> Self { debug_assert!(mem::size_of::<Self::Output>() == mem::size_of::<Self>()); let from = mem::ManuallyDrop::new(from); let ptr: *const Self = (&*from as *const Self::Output).cast(); ptr::read(ptr) } #[inline] fn transform_mut<F>(&'a mut self, f: F) where F: 'static + for<'b> FnOnce(&'b mut Self::Output), { unsafe { f(mem::transmute::<&mut Self, &mut Self::Output>(self)) } } } /// This impl requires enabling the optional `yoke` Cargo feature of the `zerovec` crate #[allow(clippy::transmute_ptr_to_ptr)] #[cfg(feature = "alloc")] unsafe impl<'a, K, V> Yokeable<'a> for ZeroMap<'static, K, V> where K: 'static + for<'b> ZeroMapKV<'b> + ?Sized, V: 'static + for<'b> ZeroMapKV<'b> + ?Sized, <K as ZeroMapKV<'static>>::Container: for<'b> Yokeable<'b>, <V as ZeroMapKV<'static>>::Container: for<'b> Yokeable<'b>, { type Output = ZeroMap<'a, K, V>; #[inline] fn transform(&'a self) -> &'a Self::Output { unsafe { // Unfortunately, because K and V are generic, rustc is // unaware that these are covariant types, and cannot perform this cast automatically. // We transmute it instead, and enforce the lack of a lifetime with the `K, V: 'static` bound mem::transmute::<&Self, &Self::Output>(self) } } #[inline] fn transform_owned(self) -> Self::Output { debug_assert!(mem::size_of::<Self::Output>() == mem::size_of::<Self>()); unsafe { // Similar problem as transform(), but we need to use ptr::read since // the compiler isn't sure of the sizes let this = mem::ManuallyDrop::new(self); let ptr: *const Self::Output = (&*this as *const Self).cast(); ptr::read(ptr) } } #[inline] unsafe fn make(from: Self::Output) -> Self { debug_assert!(mem::size_of::<Self::Output>() == mem::size_of::<Self>()); let from = mem::ManuallyDrop::new(from); let ptr: *const Self = (&*from as *const Self::Output).cast(); ptr::read(ptr) } #[inline] fn transform_mut<F>(&'a mut self, f: F) where F: 'static + for<'b> FnOnce(&'b mut Self::Output), { unsafe { f(mem::transmute::<&mut Self, &mut Self::Output>(self)) } } } /// This impl requires enabling the optional `yoke` Cargo feature of the `zerovec` crate #[allow(clippy::transmute_ptr_to_ptr)] #[cfg(feature = "alloc")] unsafe impl<'a, K, V> Yokeable<'a> for ZeroMapBorrowed<'static, K, V> where K: 'static + for<'b> ZeroMapKV<'b> + ?Sized, V: 'static + for<'b> ZeroMapKV<'b> + ?Sized, &'static <K as ZeroMapKV<'static>>::Slice: for<'b> Yokeable<'b>, &'static <V as ZeroMapKV<'static>>::Slice: for<'b> Yokeable<'b>, { type Output = ZeroMapBorrowed<'a, K, V>; #[inline] fn transform(&'a self) -> &'a Self::Output { unsafe { // Unfortunately, because K and V are generic, rustc is // unaware that these are covariant types, and cannot perform this cast automatically. // We transmute it instead, and enforce the lack of a lifetime with the `K, V: 'static` bound mem::transmute::<&Self, &Self::Output>(self) } } #[inline] fn transform_owned(self) -> Self::Output { debug_assert!(mem::size_of::<Self::Output>() == mem::size_of::<Self>()); unsafe { // Similar problem as transform(), but we need to use ptr::read since // the compiler isn't sure of the sizes let this = mem::ManuallyDrop::new(self); let ptr: *const Self::Output = (&*this as *const Self).cast(); ptr::read(ptr) } } #[inline] unsafe fn make(from: Self::Output) -> Self { debug_assert!(mem::size_of::<Self::Output>() == mem::size_of::<Self>()); let from = mem::ManuallyDrop::new(from); let ptr: *const Self = (&*from as *const Self::Output).cast(); ptr::read(ptr) } #[inline] fn transform_mut<F>(&'a mut self, f: F) where F: 'static + for<'b> FnOnce(&'b mut Self::Output), { unsafe { f(mem::transmute::<&mut Self, &mut Self::Output>(self)) } } } /// This impl requires enabling the optional `yoke` Cargo feature of the `zerovec` crate #[allow(clippy::transmute_ptr_to_ptr)] #[cfg(feature = "alloc")] unsafe impl<'a, K0, K1, V> Yokeable<'a> for ZeroMap2d<'static, K0, K1, V> where K0: 'static + for<'b> ZeroMapKV<'b> + ?Sized, K1: 'static + for<'b> ZeroMapKV<'b> + ?Sized, V: 'static + for<'b> ZeroMapKV<'b> + ?Sized, <K0 as ZeroMapKV<'static>>::Container: for<'b> Yokeable<'b>, <K1 as ZeroMapKV<'static>>::Container: for<'b> Yokeable<'b>, <V as ZeroMapKV<'static>>::Container: for<'b> Yokeable<'b>, { type Output = ZeroMap2d<'a, K0, K1, V>; #[inline] fn transform(&'a self) -> &'a Self::Output { unsafe { // Unfortunately, because K and V are generic, rustc is // unaware that these are covariant types, and cannot perform this cast automatically. // We transmute it instead, and enforce the lack of a lifetime with the `K0, K1, V: 'static` bound mem::transmute::<&Self, &Self::Output>(self) } } #[inline] fn transform_owned(self) -> Self::Output { debug_assert!(mem::size_of::<Self::Output>() == mem::size_of::<Self>()); unsafe { // Similar problem as transform(), but we need to use ptr::read since // the compiler isn't sure of the sizes let this = mem::ManuallyDrop::new(self); let ptr: *const Self::Output = (&*this as *const Self).cast(); ptr::read(ptr) } } #[inline] unsafe fn make(from: Self::Output) -> Self { debug_assert!(mem::size_of::<Self::Output>() == mem::size_of::<Self>()); let from = mem::ManuallyDrop::new(from); let ptr: *const Self = (&*from as *const Self::Output).cast(); ptr::read(ptr) } #[inline] fn transform_mut<F>(&'a mut self, f: F) where F: 'static + for<'b> FnOnce(&'b mut Self::Output), { unsafe { f(mem::transmute::<&mut Self, &mut Self::Output>(self)) } } } /// This impl requires enabling the optional `yoke` Cargo feature of the `zerovec` crate #[allow(clippy::transmute_ptr_to_ptr)] #[cfg(feature = "alloc")] unsafe impl<'a, K0, K1, V> Yokeable<'a> for ZeroMap2dBorrowed<'static, K0, K1, V> where K0: 'static + for<'b> ZeroMapKV<'b> + ?Sized, K1: 'static + for<'b> ZeroMapKV<'b> + ?Sized, V: 'static + for<'b> ZeroMapKV<'b> + ?Sized, &'static <K0 as ZeroMapKV<'static>>::Slice: for<'b> Yokeable<'b>, &'static <K1 as ZeroMapKV<'static>>::Slice: for<'b> Yokeable<'b>, &'static <V as ZeroMapKV<'static>>::Slice: for<'b> Yokeable<'b>, { type Output = ZeroMap2dBorrowed<'a, K0, K1, V>; #[inline] fn transform(&'a self) -> &'a Self::Output { unsafe { // Unfortunately, because K and V are generic, rustc is // unaware that these are covariant types, and cannot perform this cast automatically. // We transmute it instead, and enforce the lack of a lifetime with the `K0, K1, V: 'static` bound mem::transmute::<&Self, &Self::Output>(self) } } #[inline] fn transform_owned(self) -> Self::Output { debug_assert!(mem::size_of::<Self::Output>() == mem::size_of::<Self>()); unsafe { // Similar problem as transform(), but we need to use ptr::read since // the compiler isn't sure of the sizes let this = mem::ManuallyDrop::new(self); let ptr: *const Self::Output = (&*this as *const Self).cast(); ptr::read(ptr) } } #[inline] unsafe fn make(from: Self::Output) -> Self { debug_assert!(mem::size_of::<Self::Output>() == mem::size_of::<Self>()); let from = mem::ManuallyDrop::new(from); let ptr: *const Self = (&*from as *const Self::Output).cast(); ptr::read(ptr) } #[inline] fn transform_mut<F>(&'a mut self, f: F) where F: 'static + for<'b> FnOnce(&'b mut Self::Output), { unsafe { f(mem::transmute::<&mut Self, &mut Self::Output>(self)) } } } #[cfg(test)] #[allow(non_camel_case_types, non_snake_case)] mod test { use super::*; use crate::{VarZeroSlice, ZeroSlice}; use databake::*; // Note: The following derives cover Yoke as well as Serde and databake. These may partially // duplicate tests elsewhere in this crate, but they are here for completeness. #[derive(yoke::Yokeable, zerofrom::ZeroFrom)] #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] #[cfg_attr(feature = "databake", derive(databake::Bake))] #[cfg_attr(feature = "databake", databake(path = zerovec::yoke_impls::test))] struct DeriveTest_ZeroVec<'data> { #[cfg_attr(feature = "serde", serde(borrow))] _data: ZeroVec<'data, u16>, } #[test] fn bake_ZeroVec() { test_bake!( DeriveTest_ZeroVec<'static>, crate::yoke_impls::test::DeriveTest_ZeroVec { _data: crate::ZeroVec::new(), }, zerovec, ); } #[derive(yoke::Yokeable)] #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] #[cfg_attr(feature = "databake", derive(databake::Bake))] #[cfg_attr(feature = "databake", databake(path = zerovec::yoke_impls::test))] struct DeriveTest_ZeroSlice<'data> { #[cfg_attr(feature = "serde", serde(borrow))] _data: &'data ZeroSlice<u16>, } #[test] fn bake_ZeroSlice() { test_bake!( DeriveTest_ZeroSlice<'static>, crate::yoke_impls::test::DeriveTest_ZeroSlice { _data: crate::ZeroSlice::new_empty(), }, zerovec, ); } #[derive(yoke::Yokeable, zerofrom::ZeroFrom)] #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] #[cfg_attr(feature = "databake", derive(databake::Bake))] #[cfg_attr(feature = "databake", databake(path = zerovec::yoke_impls::test))] struct DeriveTest_VarZeroVec<'data> { #[cfg_attr(feature = "serde", serde(borrow))] _data: VarZeroVec<'data, str>, } #[test] fn bake_VarZeroVec() { test_bake!( DeriveTest_VarZeroVec<'static>, crate::yoke_impls::test::DeriveTest_VarZeroVec { _data: crate::vecs::VarZeroVec16::new(), }, zerovec, ); } #[derive(yoke::Yokeable)] #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] #[cfg_attr(feature = "databake", derive(databake::Bake))] #[cfg_attr(feature = "databake", databake(path = zerovec::yoke_impls::test))] struct DeriveTest_VarZeroSlice<'data> { #[cfg_attr(feature = "serde", serde(borrow))] _data: &'data VarZeroSlice<str>, } #[test] fn bake_VarZeroSlice() { test_bake!( DeriveTest_VarZeroSlice<'static>, crate::yoke_impls::test::DeriveTest_VarZeroSlice { _data: crate::vecs::VarZeroSlice16::new_empty() }, zerovec, ); } #[derive(yoke::Yokeable, zerofrom::ZeroFrom)] #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] #[cfg_attr(feature = "databake", derive(databake::Bake))] #[cfg_attr(feature = "databake", databake(path = zerovec::yoke_impls::test))] #[yoke(prove_covariance_manually)] struct DeriveTest_ZeroMap<'data> { #[cfg_attr(feature = "serde", serde(borrow))] _data: ZeroMap<'data, [u8], str>, } #[test] fn bake_ZeroMap() { test_bake!( DeriveTest_ZeroMap<'static>, crate::yoke_impls::test::DeriveTest_ZeroMap { _data: unsafe { #[allow(unused_unsafe)] crate::ZeroMap::from_parts_unchecked( crate::vecs::VarZeroVec16::new(), crate::vecs::VarZeroVec16::new(), ) }, }, zerovec, ); } #[derive(yoke::Yokeable)] #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] #[cfg_attr(feature = "databake", derive(databake::Bake))] #[cfg_attr(feature = "databake", databake(path = zerovec::yoke_impls::test))] #[yoke(prove_covariance_manually)] struct DeriveTest_ZeroMapBorrowed<'data> { #[cfg_attr(feature = "serde", serde(borrow))] _data: ZeroMapBorrowed<'data, [u8], str>, } #[test] fn bake_ZeroMapBorrowed() { test_bake!( DeriveTest_ZeroMapBorrowed<'static>, crate::yoke_impls::test::DeriveTest_ZeroMapBorrowed { _data: unsafe { #[allow(unused_unsafe)] crate::maps::ZeroMapBorrowed::from_parts_unchecked( crate::vecs::VarZeroSlice16::new_empty(), crate::vecs::VarZeroSlice16::new_empty(), ) }, }, zerovec, ); } #[derive(yoke::Yokeable, zerofrom::ZeroFrom)] #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] #[cfg_attr(feature = "databake", derive(databake::Bake))] #[cfg_attr(feature = "databake", databake(path = zerovec::yoke_impls::test))] #[yoke(prove_covariance_manually)] struct DeriveTest_ZeroMapWithULE<'data> { #[cfg_attr(feature = "serde", serde(borrow))] _data: ZeroMap<'data, ZeroSlice<u32>, str>, } #[test] fn bake_ZeroMapWithULE() { test_bake!( DeriveTest_ZeroMapWithULE<'static>, crate::yoke_impls::test::DeriveTest_ZeroMapWithULE { _data: unsafe { #[allow(unused_unsafe)] crate::ZeroMap::from_parts_unchecked( crate::vecs::VarZeroVec16::new(), crate::vecs::VarZeroVec16::new(), ) }, }, zerovec, ); } #[derive(yoke::Yokeable, zerofrom::ZeroFrom)] #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] #[cfg_attr(feature = "databake", derive(databake::Bake))] #[cfg_attr(feature = "databake", databake(path = zerovec::yoke_impls::test))] #[yoke(prove_covariance_manually)] struct DeriveTest_ZeroMap2d<'data> { #[cfg_attr(feature = "serde", serde(borrow))] _data: ZeroMap2d<'data, u16, u16, str>, } #[test] fn bake_ZeroMap2d() { test_bake!( DeriveTest_ZeroMap2d<'static>, crate::yoke_impls::test::DeriveTest_ZeroMap2d { _data: unsafe { #[allow(unused_unsafe)] crate::ZeroMap2d::from_parts_unchecked( crate::ZeroVec::new(), crate::ZeroVec::new(), crate::ZeroVec::new(), crate::vecs::VarZeroVec16::new(), ) }, }, zerovec, ); } #[derive(yoke::Yokeable)] #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] #[cfg_attr(feature = "databake", derive(databake::Bake))] #[cfg_attr(feature = "databake", databake(path = zerovec::yoke_impls::test))] #[yoke(prove_covariance_manually)] struct DeriveTest_ZeroMap2dBorrowed<'data> { #[cfg_attr(feature = "serde", serde(borrow))] _data: ZeroMap2dBorrowed<'data, u16, u16, str>, } #[test] fn bake_ZeroMap2dBorrowed() { test_bake!( DeriveTest_ZeroMap2dBorrowed<'static>, crate::yoke_impls::test::DeriveTest_ZeroMap2dBorrowed { _data: unsafe { #[allow(unused_unsafe)] crate::maps::ZeroMap2dBorrowed::from_parts_unchecked( crate::ZeroSlice::new_empty(), crate::ZeroSlice::new_empty(), crate::ZeroSlice::new_empty(), crate::vecs::VarZeroSlice16::new_empty(), ) }, }, zerovec, ); } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). #[cfg(feature = "alloc")] use crate::map::ZeroMapKV; use crate::ule::*; use crate::vecs::VarZeroVecFormat; use crate::{VarZeroSlice, VarZeroVec, ZeroSlice, ZeroVec}; #[cfg(feature = "alloc")] use crate::{ZeroMap, ZeroMap2d}; use zerofrom::ZeroFrom; impl<'zf, T> ZeroFrom<'zf, ZeroVec<'_, T>> for ZeroVec<'zf, T> where T: 'static + AsULE, { #[inline] fn zero_from(other: &'zf ZeroVec<'_, T>) -> Self { ZeroVec::new_borrowed(other.as_ule_slice()) } } impl<'zf, T> ZeroFrom<'zf, ZeroSlice<T>> for ZeroVec<'zf, T> where T: 'static + AsULE, { #[inline] fn zero_from(other: &'zf ZeroSlice<T>) -> Self { ZeroVec::new_borrowed(other.as_ule_slice()) } } impl<'zf, T> ZeroFrom<'zf, ZeroSlice<T>> for &'zf ZeroSlice<T> where T: 'static + AsULE, { #[inline] fn zero_from(other: &'zf ZeroSlice<T>) -> Self { other } } impl<'zf, T, F: VarZeroVecFormat> ZeroFrom<'zf, VarZeroSlice<T, F>> for VarZeroVec<'zf, T, F> where T: 'static + VarULE + ?Sized, { #[inline] fn zero_from(other: &'zf VarZeroSlice<T, F>) -> Self { other.into() } } impl<'zf, T, F: VarZeroVecFormat> ZeroFrom<'zf, VarZeroVec<'_, T, F>> for VarZeroVec<'zf, T, F> where T: 'static + VarULE + ?Sized, { #[inline] fn zero_from(other: &'zf VarZeroVec<'_, T, F>) -> Self { other.as_slice().into() } } impl<'zf, T> ZeroFrom<'zf, VarZeroSlice<T>> for &'zf VarZeroSlice<T> where T: 'static + VarULE + ?Sized, { #[inline] fn zero_from(other: &'zf VarZeroSlice<T>) -> Self { other } } #[cfg(feature = "alloc")] impl<'zf, 's, K, V> ZeroFrom<'zf, ZeroMap<'s, K, V>> for ZeroMap<'zf, K, V> where K: 'static + for<'b> ZeroMapKV<'b> + ?Sized, V: 'static + for<'b> ZeroMapKV<'b> + ?Sized, <K as ZeroMapKV<'zf>>::Container: ZeroFrom<'zf, <K as ZeroMapKV<'s>>::Container>, <V as ZeroMapKV<'zf>>::Container: ZeroFrom<'zf, <V as ZeroMapKV<'s>>::Container>, { fn zero_from(other: &'zf ZeroMap<'s, K, V>) -> Self { ZeroMap { keys: K::Container::zero_from(&other.keys), values: V::Container::zero_from(&other.values), } } } #[cfg(feature = "alloc")] impl<'zf, 's, K0, K1, V> ZeroFrom<'zf, ZeroMap2d<'s, K0, K1, V>> for ZeroMap2d<'zf, K0, K1, V> where K0: 'static + for<'b> ZeroMapKV<'b> + ?Sized, K1: 'static + for<'b> ZeroMapKV<'b> + ?Sized, V: 'static + for<'b> ZeroMapKV<'b> + ?Sized, <K0 as ZeroMapKV<'zf>>::Container: ZeroFrom<'zf, <K0 as ZeroMapKV<'s>>::Container>, <K1 as ZeroMapKV<'zf>>::Container: ZeroFrom<'zf, <K1 as ZeroMapKV<'s>>::Container>, <V as ZeroMapKV<'zf>>::Container: ZeroFrom<'zf, <V as ZeroMapKV<'s>>::Container>, { fn zero_from(other: &'zf ZeroMap2d<'s, K0, K1, V>) -> Self { ZeroMap2d { keys0: K0::Container::zero_from(&other.keys0), joiner: ZeroVec::zero_from(&other.joiner), keys1: K1::Container::zero_from(&other.keys1), values: V::Container::zero_from(&other.values), } } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use super::ZeroVec; use crate::{ule::AsULE, ZeroSlice}; use databake::*; impl<T: AsULE> Bake for ZeroVec<'_, T> { fn bake(&self, env: &CrateEnv) -> TokenStream { env.insert("zerovec"); if self.is_empty() { quote! { zerovec::ZeroVec::new() } } else { let bytes = databake::Bake::bake(&self.as_bytes(), env); quote! { unsafe { zerovec::ZeroVec::from_bytes_unchecked(#bytes) } } } } } impl<T: AsULE> BakeSize for ZeroVec<'_, T> { fn borrows_size(&self) -> usize { self.as_bytes().len() } } impl<T: AsULE> Bake for &ZeroSlice<T> { fn bake(&self, env: &CrateEnv) -> TokenStream { env.insert("zerovec"); if self.is_empty() { quote! { zerovec::ZeroSlice::new_empty() } } else { let bytes = databake::Bake::bake(&self.as_bytes(), env); quote! { unsafe { zerovec::ZeroSlice::from_bytes_unchecked(#bytes) } } } } } impl<T: AsULE> BakeSize for &ZeroSlice<T> { fn borrows_size(&self) -> usize { self.as_bytes().len() } } #[test] fn test_baked_vec() { test_bake!(ZeroVec<u32>, const, crate::ZeroVec::new(), zerovec); test_bake!( ZeroVec<u32>, const, unsafe { crate::ZeroVec::from_bytes_unchecked(b"\x02\x01\0\x16\0M\x01\\") }, zerovec ); } #[test] fn test_baked_slice() { test_bake!( &ZeroSlice<u32>, const, crate::ZeroSlice::new_empty(), zerovec ); test_bake!( &ZeroSlice<u32>, const, unsafe { crate::ZeroSlice::from_bytes_unchecked(b"\x02\x01\0\x16\0M\x01\\") }, zerovec ); }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). #[cfg(feature = "databake")] mod databake; #[cfg(feature = "serde")] mod serde; mod slice; pub use slice::ZeroSlice; pub use slice::ZeroSliceIter; use crate::ule::*; #[cfg(feature = "alloc")] use alloc::borrow::Cow; #[cfg(feature = "alloc")] use alloc::vec::Vec; use core::cmp::{Ord, Ordering, PartialOrd}; use core::fmt; #[cfg(feature = "alloc")] use core::iter::FromIterator; use core::marker::PhantomData; use core::num::NonZeroUsize; use core::ops::Deref; use core::ptr::NonNull; /// A zero-copy, byte-aligned vector for fixed-width types. /// /// `ZeroVec<T>` is designed as a drop-in replacement for `Vec<T>` in situations where it is /// desirable to borrow data from an unaligned byte slice, such as zero-copy deserialization. /// /// `T` must implement [`AsULE`], which is auto-implemented for a number of built-in types, /// including all fixed-width multibyte integers. For variable-width types like [`str`], /// see [`VarZeroVec`](crate::VarZeroVec). [`zerovec::make_ule`](crate::make_ule) may /// be used to automatically implement [`AsULE`] for a type and generate the underlying [`ULE`] type. /// /// Typically, the zero-copy equivalent of a `Vec<T>` will simply be `ZeroVec<'a, T>`. /// /// Most of the methods on `ZeroVec<'a, T>` come from its [`Deref`] implementation to [`ZeroSlice<T>`](ZeroSlice). /// /// For creating zero-copy vectors of fixed-size types, see [`VarZeroVec`](crate::VarZeroVec). /// /// `ZeroVec<T>` behaves much like [`Cow`](alloc::borrow::Cow), where it can be constructed from /// owned data (and then mutated!) but can also borrow from some buffer. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// // The little-endian bytes correspond to the numbers on the following line. /// let nums: &[u16] = &[211, 281, 421, 461]; /// /// #[derive(serde::Serialize, serde::Deserialize)] /// struct Data<'a> { /// #[serde(borrow)] /// nums: ZeroVec<'a, u16>, /// } /// /// // The owned version will allocate /// let data = Data { /// nums: ZeroVec::alloc_from_slice(nums), /// }; /// let bincode_bytes = /// bincode::serialize(&data).expect("Serialization should be successful"); /// /// // Will deserialize without allocations /// let deserialized: Data = bincode::deserialize(&bincode_bytes) /// .expect("Deserialization should be successful"); /// /// // This deserializes without allocation! /// assert!(!deserialized.nums.is_owned()); /// assert_eq!(deserialized.nums.get(2), Some(421)); /// assert_eq!(deserialized.nums, nums); /// ``` /// /// [`ule`]: crate::ule /// /// # How it Works /// /// `ZeroVec<T>` represents a slice of `T` as a slice of `T::ULE`. The difference between `T` and /// `T::ULE` is that `T::ULE` must be encoded in little-endian with 1-byte alignment. When accessing /// items from `ZeroVec<T>`, we fetch the `T::ULE`, convert it on the fly to `T`, and return `T` by /// value. /// /// Benchmarks can be found in the project repository, with some results found in the [crate-level documentation](crate). /// /// See [the design doc](https://github.com/unicode-org/icu4x/blob/main/utils/zerovec/design_doc.md) for more details. pub struct ZeroVec<'a, T> where T: AsULE, { vector: EyepatchHackVector<T::ULE>, /// Marker type, signalling variance and dropck behavior /// by containing all potential types this type represents marker1: PhantomData<T::ULE>, marker2: PhantomData<&'a T::ULE>, } // Send inherits as long as all fields are Send, but also references are Send only // when their contents are Sync (this is the core purpose of Sync), so // we need a Send+Sync bound since this struct can logically be a vector or a slice. unsafe impl<'a, T: AsULE> Send for ZeroVec<'a, T> where T::ULE: Send + Sync {} // Sync typically inherits as long as all fields are Sync unsafe impl<'a, T: AsULE> Sync for ZeroVec<'a, T> where T::ULE: Sync {} impl<'a, T: AsULE> Deref for ZeroVec<'a, T> { type Target = ZeroSlice<T>; #[inline] fn deref(&self) -> &Self::Target { self.as_slice() } } // Represents an unsafe potentially-owned vector/slice type, without a lifetime // working around dropck limitations. // // Must either be constructed by deconstructing a Vec<U>, or from &[U] with capacity set to // zero. Should not outlive its source &[U] in the borrowed case; this type does not in // and of itself uphold this guarantee, but the .as_slice() method assumes it. // // After https://github.com/rust-lang/rust/issues/34761 stabilizes, // we should remove this type and use #[may_dangle] struct EyepatchHackVector<U> { /// Pointer to data /// This pointer is *always* valid, the reason it is represented as a raw pointer /// is that it may logically represent an `&[T::ULE]` or the ptr,len of a `Vec<T::ULE>` buf: NonNull<[U]>, #[cfg(feature = "alloc")] /// Borrowed if zero. Capacity of buffer above if not capacity: usize, } impl<U> EyepatchHackVector<U> { // Return a slice to the inner data for an arbitrary caller-specified lifetime #[inline] unsafe fn as_arbitrary_slice<'a>(&self) -> &'a [U] { self.buf.as_ref() } // Return a slice to the inner data #[inline] const fn as_slice<'a>(&'a self) -> &'a [U] { // Note: self.buf.as_ref() is not const until 1.73 unsafe { &*(self.buf.as_ptr() as *const [U]) } } /// Return this type as a vector /// /// Data MUST be known to be owned beforehand /// /// Because this borrows self, this is effectively creating two owners to the same /// data, make sure that `self` is cleaned up after this /// /// (this does not simply take `self` since then it wouldn't be usable from the Drop impl) #[cfg(feature = "alloc")] unsafe fn get_vec(&self) -> Vec<U> { debug_assert!(self.capacity != 0); let slice: &[U] = self.as_slice(); let len = slice.len(); // Safety: we are assuming owned, and in owned cases // this always represents a valid vector Vec::from_raw_parts(self.buf.as_ptr() as *mut U, len, self.capacity) } } #[cfg(feature = "alloc")] impl<U> Drop for EyepatchHackVector<U> { #[inline] fn drop(&mut self) { if self.capacity != 0 { unsafe { // we don't need to clean up self here since we're already in a Drop impl let _ = self.get_vec(); } } } } impl<'a, T: AsULE> Clone for ZeroVec<'a, T> { fn clone(&self) -> Self { #[cfg(feature = "alloc")] if self.is_owned() { return ZeroVec::new_owned(self.as_ule_slice().into()); } Self { vector: EyepatchHackVector { buf: self.vector.buf, #[cfg(feature = "alloc")] capacity: 0, }, marker1: PhantomData, marker2: PhantomData, } } } impl<'a, T: AsULE> AsRef<ZeroSlice<T>> for ZeroVec<'a, T> { fn as_ref(&self) -> &ZeroSlice<T> { self.as_slice() } } impl<T> fmt::Debug for ZeroVec<'_, T> where T: AsULE + fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "ZeroVec([")?; let mut first = true; for el in self.iter() { if !first { write!(f, ", ")?; } write!(f, "{el:?}")?; first = false; } write!(f, "])") } } impl<T> Eq for ZeroVec<'_, T> where T: AsULE + Eq {} impl<'a, 'b, T> PartialEq<ZeroVec<'b, T>> for ZeroVec<'a, T> where T: AsULE + PartialEq, { #[inline] fn eq(&self, other: &ZeroVec<'b, T>) -> bool { // Note: T implements PartialEq but not T::ULE self.iter().eq(other.iter()) } } impl<T> PartialEq<&[T]> for ZeroVec<'_, T> where T: AsULE + PartialEq, { #[inline] fn eq(&self, other: &&[T]) -> bool { self.iter().eq(other.iter().copied()) } } impl<T, const N: usize> PartialEq<[T; N]> for ZeroVec<'_, T> where T: AsULE + PartialEq, { #[inline] fn eq(&self, other: &[T; N]) -> bool { self.iter().eq(other.iter().copied()) } } impl<'a, T: AsULE> Default for ZeroVec<'a, T> { #[inline] fn default() -> Self { Self::new() } } impl<'a, T: AsULE + PartialOrd> PartialOrd for ZeroVec<'a, T> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.iter().partial_cmp(other.iter()) } } impl<'a, T: AsULE + Ord> Ord for ZeroVec<'a, T> { fn cmp(&self, other: &Self) -> Ordering { self.iter().cmp(other.iter()) } } impl<'a, T: AsULE> AsRef<[T::ULE]> for ZeroVec<'a, T> { fn as_ref(&self) -> &[T::ULE] { self.as_ule_slice() } } impl<'a, T: AsULE> From<&'a [T::ULE]> for ZeroVec<'a, T> { fn from(other: &'a [T::ULE]) -> Self { ZeroVec::new_borrowed(other) } } #[cfg(feature = "alloc")] impl<'a, T: AsULE> From<Vec<T::ULE>> for ZeroVec<'a, T> { fn from(other: Vec<T::ULE>) -> Self { ZeroVec::new_owned(other) } } impl<'a, T: AsULE> ZeroVec<'a, T> { /// Creates a new, borrowed, empty `ZeroVec<T>`. /// /// # Examples /// /// ``` /// use zerovec::ZeroVec; /// /// let zv: ZeroVec<u16> = ZeroVec::new(); /// assert!(zv.is_empty()); /// ``` #[inline] pub const fn new() -> Self { Self::new_borrowed(&[]) } /// Same as `ZeroSlice::len`, which is available through `Deref` and not `const`. pub const fn const_len(&self) -> usize { self.vector.as_slice().len() } /// Creates a new owned `ZeroVec` using an existing /// allocated backing buffer /// /// If you have a slice of `&[T]`s, prefer using /// [`Self::alloc_from_slice()`]. #[inline] #[cfg(feature = "alloc")] pub fn new_owned(vec: Vec<T::ULE>) -> Self { // Deconstruct the vector into parts // This is the only part of the code that goes from Vec // to ZeroVec, all other such operations should use this function let capacity = vec.capacity(); let len = vec.len(); let ptr = core::mem::ManuallyDrop::new(vec).as_mut_ptr(); // Safety: `ptr` comes from Vec::as_mut_ptr, which says: // "Returns an unsafe mutable pointer to the vector’s buffer, // or a dangling raw pointer valid for zero sized reads" let ptr = unsafe { NonNull::new_unchecked(ptr) }; let buf = NonNull::slice_from_raw_parts(ptr, len); Self { vector: EyepatchHackVector { buf, capacity }, marker1: PhantomData, marker2: PhantomData, } } /// Creates a new borrowed `ZeroVec` using an existing /// backing buffer #[inline] pub const fn new_borrowed(slice: &'a [T::ULE]) -> Self { // Safety: references in Rust cannot be null. // The safe function `impl From<&T> for NonNull<T>` is not const. let slice = unsafe { NonNull::new_unchecked(slice as *const [_] as *mut [_]) }; Self { vector: EyepatchHackVector { buf: slice, #[cfg(feature = "alloc")] capacity: 0, }, marker1: PhantomData, marker2: PhantomData, } } /// Creates a new, owned, empty `ZeroVec<T>`, with a certain capacity pre-allocated. #[cfg(feature = "alloc")] pub fn with_capacity(capacity: usize) -> Self { Self::new_owned(Vec::with_capacity(capacity)) } /// Parses a `&[u8]` buffer into a `ZeroVec<T>`. /// /// This function is infallible for built-in integer types, but fallible for other types, /// such as `char`. For more information, see [`ULE::parse_bytes_to_slice`]. /// /// The bytes within the byte buffer must remain constant for the life of the ZeroVec. /// /// # Endianness /// /// The byte buffer must be encoded in little-endian, even if running in a big-endian /// environment. This ensures a consistent representation of data across platforms. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x01]; /// let zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// /// assert!(!zerovec.is_owned()); /// assert_eq!(zerovec.get(2), Some(421)); /// ``` pub fn parse_bytes(bytes: &'a [u8]) -> Result<Self, UleError> { let slice: &'a [T::ULE] = T::ULE::parse_bytes_to_slice(bytes)?; Ok(Self::new_borrowed(slice)) } /// Uses a `&[u8]` buffer as a `ZeroVec<T>` without any verification. /// /// # Safety /// /// `bytes` need to be an output from [`ZeroSlice::as_bytes()`]. pub const unsafe fn from_bytes_unchecked(bytes: &'a [u8]) -> Self { // &[u8] and &[T::ULE] are the same slice with different length metadata. Self::new_borrowed(core::slice::from_raw_parts( bytes.as_ptr() as *const T::ULE, bytes.len() / core::mem::size_of::<T::ULE>(), )) } /// Converts a `ZeroVec<T>` into a `ZeroVec<u8>`, retaining the current ownership model. /// /// Note that the length of the ZeroVec may change. /// /// # Examples /// /// Convert a borrowed `ZeroVec`: /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x01]; /// let zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// let zv_bytes = zerovec.into_bytes(); /// /// assert!(!zv_bytes.is_owned()); /// assert_eq!(zv_bytes.get(0), Some(0xD3)); /// ``` /// /// Convert an owned `ZeroVec`: /// /// ``` /// use zerovec::ZeroVec; /// /// let nums: &[u16] = &[211, 281, 421, 461]; /// let zerovec = ZeroVec::alloc_from_slice(nums); /// let zv_bytes = zerovec.into_bytes(); /// /// assert!(zv_bytes.is_owned()); /// assert_eq!(zv_bytes.get(0), Some(0xD3)); /// ``` #[cfg(feature = "alloc")] pub fn into_bytes(self) -> ZeroVec<'a, u8> { use alloc::borrow::Cow; match self.into_cow() { Cow::Borrowed(slice) => { let bytes: &'a [u8] = T::ULE::slice_as_bytes(slice); ZeroVec::new_borrowed(bytes) } Cow::Owned(vec) => { let bytes = Vec::from(T::ULE::slice_as_bytes(&vec)); ZeroVec::new_owned(bytes) } } } /// Returns this [`ZeroVec`] as a [`ZeroSlice`]. /// /// To get a reference with a longer lifetime from a borrowed [`ZeroVec`], /// use [`ZeroVec::as_maybe_borrowed`]. #[inline] pub const fn as_slice(&self) -> &ZeroSlice<T> { let slice: &[T::ULE] = self.vector.as_slice(); ZeroSlice::from_ule_slice(slice) } /// Casts a `ZeroVec<T>` to a compatible `ZeroVec<P>`. /// /// `T` and `P` are compatible if they have the same `ULE` representation. /// /// If the `ULE`s of `T` and `P` are different types but have the same size, /// use [`Self::try_into_converted()`]. /// /// # Examples /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x80]; /// /// let zerovec_u16: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// assert_eq!(zerovec_u16.get(3), Some(32973)); /// /// let zerovec_i16: ZeroVec<i16> = zerovec_u16.cast(); /// assert_eq!(zerovec_i16.get(3), Some(-32563)); /// ``` #[cfg(feature = "alloc")] pub fn cast<P>(self) -> ZeroVec<'a, P> where P: AsULE<ULE = T::ULE>, { match self.into_cow() { Cow::Owned(v) => ZeroVec::new_owned(v), Cow::Borrowed(v) => ZeroVec::new_borrowed(v), } } /// Converts a `ZeroVec<T>` into a `ZeroVec<P>`, retaining the current ownership model. /// /// If `T` and `P` have the exact same `ULE`, use [`Self::cast()`]. /// /// # Panics /// /// Panics if `T::ULE` and `P::ULE` are not the same size. /// /// # Examples /// /// Convert a borrowed `ZeroVec`: /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0x7F, 0xF3, 0x01, 0x49, 0xF6, 0x01]; /// let zv_char: ZeroVec<char> = /// ZeroVec::parse_bytes(bytes).expect("valid code points"); /// let zv_u8_3: ZeroVec<[u8; 3]> = /// zv_char.try_into_converted().expect("infallible conversion"); /// /// assert!(!zv_u8_3.is_owned()); /// assert_eq!(zv_u8_3.get(0), Some([0x7F, 0xF3, 0x01])); /// ``` /// /// Convert an owned `ZeroVec`: /// /// ``` /// use zerovec::ZeroVec; /// /// let chars: &[char] = &['🍿', '🙉']; /// let zv_char = ZeroVec::alloc_from_slice(chars); /// let zv_u8_3: ZeroVec<[u8; 3]> = /// zv_char.try_into_converted().expect("length is divisible"); /// /// assert!(zv_u8_3.is_owned()); /// assert_eq!(zv_u8_3.get(0), Some([0x7F, 0xF3, 0x01])); /// ``` /// /// If the types are not the same size, we refuse to convert: /// /// ```should_panic /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0x7F, 0xF3, 0x01, 0x49, 0xF6, 0x01]; /// let zv_char: ZeroVec<char> = /// ZeroVec::parse_bytes(bytes).expect("valid code points"); /// /// // Panics! core::mem::size_of::<char::ULE> != core::mem::size_of::<u16::ULE> /// zv_char.try_into_converted::<u16>(); /// ``` /// /// Instead, convert to bytes and then parse: /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0x7F, 0xF3, 0x01, 0x49, 0xF6, 0x01]; /// let zv_char: ZeroVec<char> = /// ZeroVec::parse_bytes(bytes).expect("valid code points"); /// let zv_u16: ZeroVec<u16> = /// zv_char.into_bytes().try_into_parsed().expect("infallible"); /// /// assert!(!zv_u16.is_owned()); /// assert_eq!(zv_u16.get(0), Some(0xF37F)); /// ``` #[cfg(feature = "alloc")] pub fn try_into_converted<P: AsULE>(self) -> Result<ZeroVec<'a, P>, UleError> { assert_eq!( core::mem::size_of::<<T as AsULE>::ULE>(), core::mem::size_of::<<P as AsULE>::ULE>() ); match self.into_cow() { Cow::Borrowed(old_slice) => { let bytes: &'a [u8] = T::ULE::slice_as_bytes(old_slice); let new_slice = P::ULE::parse_bytes_to_slice(bytes)?; Ok(ZeroVec::new_borrowed(new_slice)) } Cow::Owned(old_vec) => { let bytes: &[u8] = T::ULE::slice_as_bytes(&old_vec); P::ULE::validate_bytes(bytes)?; // Feature "vec_into_raw_parts" is not yet stable (#65816). Polyfill: let (ptr, len, cap) = { // Take ownership of the pointer let mut v = core::mem::ManuallyDrop::new(old_vec); // Fetch the pointer, length, and capacity (v.as_mut_ptr(), v.len(), v.capacity()) }; // Safety checklist for Vec::from_raw_parts: // 1. ptr came from a Vec<T> // 2. P and T are asserted above to be the same size // 3. length is what it was before // 4. capacity is what it was before let new_vec = unsafe { let ptr = ptr as *mut P::ULE; Vec::from_raw_parts(ptr, len, cap) }; Ok(ZeroVec::new_owned(new_vec)) } } } /// Check if this type is fully owned #[inline] pub fn is_owned(&self) -> bool { #[cfg(feature = "alloc")] return self.vector.capacity != 0; #[cfg(not(feature = "alloc"))] return false; } /// If this is a borrowed [`ZeroVec`], return it as a slice that covers /// its lifetime parameter. /// /// To infallibly get a [`ZeroSlice`] with a shorter lifetime, use /// [`ZeroVec::as_slice`]. #[inline] pub fn as_maybe_borrowed(&self) -> Option<&'a ZeroSlice<T>> { if self.is_owned() { None } else { // We can extend the lifetime of the slice to 'a // since we know it is borrowed let ule_slice = unsafe { self.vector.as_arbitrary_slice() }; Some(ZeroSlice::from_ule_slice(ule_slice)) } } /// If the ZeroVec is owned, returns the capacity of the vector. /// /// Otherwise, if the ZeroVec is borrowed, returns `None`. /// /// # Examples /// /// ``` /// use zerovec::ZeroVec; /// /// let mut zv = ZeroVec::<u8>::new_borrowed(&[0, 1, 2, 3]); /// assert!(!zv.is_owned()); /// assert_eq!(zv.owned_capacity(), None); /// /// // Convert to owned without appending anything /// zv.with_mut(|v| ()); /// assert!(zv.is_owned()); /// assert_eq!(zv.owned_capacity(), Some(4.try_into().unwrap())); /// /// // Double the size by appending /// zv.with_mut(|v| v.push(0)); /// assert!(zv.is_owned()); /// assert_eq!(zv.owned_capacity(), Some(8.try_into().unwrap())); /// ``` #[inline] pub fn owned_capacity(&self) -> Option<NonZeroUsize> { #[cfg(feature = "alloc")] return NonZeroUsize::try_from(self.vector.capacity).ok(); #[cfg(not(feature = "alloc"))] return None; } } impl<'a> ZeroVec<'a, u8> { /// Converts a `ZeroVec<u8>` into a `ZeroVec<T>`, retaining the current ownership model. /// /// Note that the length of the ZeroVec may change. /// /// # Examples /// /// Convert a borrowed `ZeroVec`: /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x01]; /// let zv_bytes = ZeroVec::new_borrowed(bytes); /// let zerovec: ZeroVec<u16> = zv_bytes.try_into_parsed().expect("infallible"); /// /// assert!(!zerovec.is_owned()); /// assert_eq!(zerovec.get(0), Some(211)); /// ``` /// /// Convert an owned `ZeroVec`: /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: Vec<u8> = vec![0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x01]; /// let zv_bytes = ZeroVec::new_owned(bytes); /// let zerovec: ZeroVec<u16> = zv_bytes.try_into_parsed().expect("infallible"); /// /// assert!(zerovec.is_owned()); /// assert_eq!(zerovec.get(0), Some(211)); /// ``` #[cfg(feature = "alloc")] pub fn try_into_parsed<T: AsULE>(self) -> Result<ZeroVec<'a, T>, UleError> { match self.into_cow() { Cow::Borrowed(bytes) => { let slice: &'a [T::ULE] = T::ULE::parse_bytes_to_slice(bytes)?; Ok(ZeroVec::new_borrowed(slice)) } Cow::Owned(vec) => { let slice = Vec::from(T::ULE::parse_bytes_to_slice(&vec)?); Ok(ZeroVec::new_owned(slice)) } } } } impl<'a, T> ZeroVec<'a, T> where T: AsULE, { /// Creates a `ZeroVec<T>` from a `&[T]` by allocating memory. /// /// This function results in an `Owned` instance of `ZeroVec<T>`. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// // The little-endian bytes correspond to the numbers on the following line. /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x01]; /// let nums: &[u16] = &[211, 281, 421, 461]; /// /// let zerovec = ZeroVec::alloc_from_slice(nums); /// /// assert!(zerovec.is_owned()); /// assert_eq!(bytes, zerovec.as_bytes()); /// ``` #[inline] #[cfg(feature = "alloc")] pub fn alloc_from_slice(other: &[T]) -> Self { Self::new_owned(other.iter().copied().map(T::to_unaligned).collect()) } /// Creates a `Vec<T>` from a `ZeroVec<T>`. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// let nums: &[u16] = &[211, 281, 421, 461]; /// let vec: Vec<u16> = ZeroVec::alloc_from_slice(nums).to_vec(); /// /// assert_eq!(nums, vec.as_slice()); /// ``` #[inline] #[cfg(feature = "alloc")] pub fn to_vec(&self) -> Vec<T> { self.iter().collect() } } impl<'a, T> ZeroVec<'a, T> where T: EqULE, { /// Attempts to create a `ZeroVec<'a, T>` from a `&'a [T]` by borrowing the argument. /// /// If this is not possible, such as on a big-endian platform, `None` is returned. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// // The little-endian bytes correspond to the numbers on the following line. /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x01]; /// let nums: &[u16] = &[211, 281, 421, 461]; /// /// if let Some(zerovec) = ZeroVec::try_from_slice(nums) { /// assert!(!zerovec.is_owned()); /// assert_eq!(bytes, zerovec.as_bytes()); /// } /// ``` #[inline] pub fn try_from_slice(slice: &'a [T]) -> Option<Self> { T::slice_to_unaligned(slice).map(|ule_slice| Self::new_borrowed(ule_slice)) } /// Creates a `ZeroVec<'a, T>` from a `&'a [T]`, either by borrowing the argument or by /// allocating a new vector. /// /// This is a cheap operation on little-endian platforms, falling back to a more expensive /// operation on big-endian platforms. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// // The little-endian bytes correspond to the numbers on the following line. /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x01]; /// let nums: &[u16] = &[211, 281, 421, 461]; /// /// let zerovec = ZeroVec::from_slice_or_alloc(nums); /// /// // Note: zerovec could be either borrowed or owned. /// assert_eq!(bytes, zerovec.as_bytes()); /// ``` #[inline] #[cfg(feature = "alloc")] pub fn from_slice_or_alloc(slice: &'a [T]) -> Self { Self::try_from_slice(slice).unwrap_or_else(|| Self::alloc_from_slice(slice)) } } impl<'a, T> ZeroVec<'a, T> where T: AsULE, { /// Mutates each element according to a given function, meant to be /// a more convenient version of calling `.iter_mut()` with /// [`ZeroVec::with_mut()`] which serves fewer use cases. /// /// This will convert the ZeroVec into an owned ZeroVec if not already the case. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x01]; /// let mut zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// /// zerovec.for_each_mut(|item| *item += 1); /// /// assert_eq!(zerovec.to_vec(), &[212, 282, 422, 462]); /// assert!(zerovec.is_owned()); /// ``` #[inline] #[cfg(feature = "alloc")] pub fn for_each_mut(&mut self, mut f: impl FnMut(&mut T)) { self.to_mut_slice().iter_mut().for_each(|item| { let mut aligned = T::from_unaligned(*item); f(&mut aligned); *item = aligned.to_unaligned() }) } /// Same as [`ZeroVec::for_each_mut()`], but bubbles up errors. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x01]; /// let mut zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// /// zerovec.try_for_each_mut(|item| { /// *item = item.checked_add(1).ok_or(())?; /// Ok(()) /// })?; /// /// assert_eq!(zerovec.to_vec(), &[212, 282, 422, 462]); /// assert!(zerovec.is_owned()); /// # Ok::<(), ()>(()) /// ``` #[inline] #[cfg(feature = "alloc")] pub fn try_for_each_mut<E>( &mut self, mut f: impl FnMut(&mut T) -> Result<(), E>, ) -> Result<(), E> { self.to_mut_slice().iter_mut().try_for_each(|item| { let mut aligned = T::from_unaligned(*item); f(&mut aligned)?; *item = aligned.to_unaligned(); Ok(()) }) } /// Converts a borrowed ZeroVec to an owned ZeroVec. No-op if already owned. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x01]; /// let zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// assert!(!zerovec.is_owned()); /// /// let owned = zerovec.into_owned(); /// assert!(owned.is_owned()); /// ``` #[cfg(feature = "alloc")] pub fn into_owned(self) -> ZeroVec<'static, T> { use alloc::borrow::Cow; match self.into_cow() { Cow::Owned(vec) => ZeroVec::new_owned(vec), Cow::Borrowed(b) => ZeroVec::new_owned(b.into()), } } /// Allows the ZeroVec to be mutated by converting it to an owned variant, and producing /// a mutable vector of ULEs. If you only need a mutable slice, consider using [`Self::to_mut_slice()`] /// instead. /// /// # Example /// /// ```rust /// # use crate::zerovec::ule::AsULE; /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x01]; /// let mut zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// assert!(!zerovec.is_owned()); /// /// zerovec.with_mut(|v| v.push(12_u16.to_unaligned())); /// assert!(zerovec.is_owned()); /// ``` #[cfg(feature = "alloc")] pub fn with_mut<R>(&mut self, f: impl FnOnce(&mut alloc::vec::Vec<T::ULE>) -> R) -> R { use alloc::borrow::Cow; // We're in danger if f() panics whilst we've moved a vector out of self; // replace it with an empty dummy vector for now let this = core::mem::take(self); let mut vec = match this.into_cow() { Cow::Owned(v) => v, Cow::Borrowed(s) => s.into(), }; let ret = f(&mut vec); *self = Self::new_owned(vec); ret } /// Allows the ZeroVec to be mutated by converting it to an owned variant (if necessary) /// and returning a slice to its backing buffer. [`Self::with_mut()`] allows for mutation /// of the vector itself. /// /// # Example /// /// ```rust /// # use crate::zerovec::ule::AsULE; /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x01]; /// let mut zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// assert!(!zerovec.is_owned()); /// /// zerovec.to_mut_slice()[1] = 5u16.to_unaligned(); /// assert!(zerovec.is_owned()); /// ``` #[cfg(feature = "alloc")] pub fn to_mut_slice(&mut self) -> &mut [T::ULE] { if !self.is_owned() { // `buf` is either a valid vector or slice of `T::ULE`s, either // way it's always valid let slice = self.vector.as_slice(); *self = ZeroVec::new_owned(slice.into()); } unsafe { self.vector.buf.as_mut() } } /// Remove all elements from this ZeroVec and reset it to an empty borrowed state. pub fn clear(&mut self) { *self = Self::new_borrowed(&[]) } /// Removes the first element of the ZeroVec. The ZeroVec remains in the same /// borrowed or owned state. /// /// # Examples /// /// ``` /// # use crate::zerovec::ule::AsULE; /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x01]; /// let mut zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// assert!(!zerovec.is_owned()); /// /// let first = zerovec.take_first().unwrap(); /// assert_eq!(first, 0x00D3); /// assert!(!zerovec.is_owned()); /// /// let mut zerovec = zerovec.into_owned(); /// assert!(zerovec.is_owned()); /// let first = zerovec.take_first().unwrap(); /// assert_eq!(first, 0x0119); /// assert!(zerovec.is_owned()); /// ``` #[cfg(feature = "alloc")] pub fn take_first(&mut self) -> Option<T> { match core::mem::take(self).into_cow() { Cow::Owned(mut vec) => { if vec.is_empty() { return None; } let ule = vec.remove(0); let rv = T::from_unaligned(ule); *self = ZeroVec::new_owned(vec); Some(rv) } Cow::Borrowed(b) => { let (ule, remainder) = b.split_first()?; let rv = T::from_unaligned(*ule); *self = ZeroVec::new_borrowed(remainder); Some(rv) } } } /// Removes the last element of the ZeroVec. The ZeroVec remains in the same /// borrowed or owned state. /// /// # Examples /// /// ``` /// # use crate::zerovec::ule::AsULE; /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x01]; /// let mut zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// assert!(!zerovec.is_owned()); /// /// let last = zerovec.take_last().unwrap(); /// assert_eq!(last, 0x01CD); /// assert!(!zerovec.is_owned()); /// /// let mut zerovec = zerovec.into_owned(); /// assert!(zerovec.is_owned()); /// let last = zerovec.take_last().unwrap(); /// assert_eq!(last, 0x01A5); /// assert!(zerovec.is_owned()); /// ``` #[cfg(feature = "alloc")] pub fn take_last(&mut self) -> Option<T> { match core::mem::take(self).into_cow() { Cow::Owned(mut vec) => { let ule = vec.pop()?; let rv = T::from_unaligned(ule); *self = ZeroVec::new_owned(vec); Some(rv) } Cow::Borrowed(b) => { let (ule, remainder) = b.split_last()?; let rv = T::from_unaligned(*ule); *self = ZeroVec::new_borrowed(remainder); Some(rv) } } } /// Converts the type into a `Cow<'a, [T::ULE]>`, which is /// the logical equivalent of this type's internal representation #[inline] #[cfg(feature = "alloc")] pub fn into_cow(self) -> Cow<'a, [T::ULE]> { let this = core::mem::ManuallyDrop::new(self); if this.is_owned() { let vec = unsafe { // safe to call: we know it's owned, // and `self`/`this` are thenceforth no longer used or dropped { this }.vector.get_vec() }; Cow::Owned(vec) } else { // We can extend the lifetime of the slice to 'a // since we know it is borrowed let slice = unsafe { { this }.vector.as_arbitrary_slice() }; Cow::Borrowed(slice) } } } #[cfg(feature = "alloc")] impl<T: AsULE> FromIterator<T> for ZeroVec<'_, T> { /// Creates an owned [`ZeroVec`] from an iterator of values. fn from_iter<I>(iter: I) -> Self where I: IntoIterator<Item = T>, { ZeroVec::new_owned(iter.into_iter().map(|t| t.to_unaligned()).collect()) } } /// Convenience wrapper for [`ZeroSlice::from_ule_slice`]. The value will be created at compile-time, /// meaning that all arguments must also be constant. /// /// # Arguments /// /// * `$aligned` - The type of an element in its canonical, aligned form, e.g., `char`. /// * `$convert` - A const function that converts an `$aligned` into its unaligned equivalent, e.g., /// `const fn from_aligned(a: CanonicalType) -> CanonicalType::ULE`. /// * `$x` - The elements that the `ZeroSlice` will hold. /// /// # Examples /// /// Using array-conversion functions provided by this crate: /// /// ``` /// use zerovec::{ZeroSlice, zeroslice, ule::AsULE}; /// /// const SIGNATURE: &ZeroSlice<char> = zeroslice!(char; <char as AsULE>::ULE::from_aligned; ['b', 'y', 'e', '✌']); /// const EMPTY: &ZeroSlice<u32> = zeroslice![]; /// /// let empty: &ZeroSlice<u32> = zeroslice![]; /// let nums = zeroslice!(u32; <u32 as AsULE>::ULE::from_unsigned; [1, 2, 3, 4, 5]); /// assert_eq!(nums.last().unwrap(), 5); /// ``` /// /// Using a custom array-conversion function: /// /// ``` /// use zerovec::{ule::AsULE, ule::RawBytesULE, zeroslice, ZeroSlice}; /// /// const fn be_convert(num: i16) -> <i16 as AsULE>::ULE { /// RawBytesULE(num.to_be_bytes()) /// } /// /// const NUMBERS_BE: &ZeroSlice<i16> = /// zeroslice!(i16; be_convert; [1, -2, 3, -4, 5]); /// ``` #[macro_export] macro_rules! zeroslice { () => ( $crate::ZeroSlice::new_empty() ); ($aligned:ty; $convert:expr; [$($x:expr),+ $(,)?]) => ( $crate::ZeroSlice::<$aligned>::from_ule_slice( {const X: &[<$aligned as $crate::ule::AsULE>::ULE] = &[ $($convert($x)),* ]; X} ) ); } /// Creates a borrowed `ZeroVec`. Convenience wrapper for `zeroslice!(...).as_zerovec()`. The value /// will be created at compile-time, meaning that all arguments must also be constant. /// /// See [`zeroslice!`](crate::zeroslice) for more information. /// /// # Examples /// /// ``` /// use zerovec::{ZeroVec, zerovec, ule::AsULE}; /// /// const SIGNATURE: ZeroVec<char> = zerovec!(char; <char as AsULE>::ULE::from_aligned; ['a', 'y', 'e', '✌']); /// assert!(!SIGNATURE.is_owned()); /// /// const EMPTY: ZeroVec<u32> = zerovec![]; /// assert!(!EMPTY.is_owned()); /// ``` #[macro_export] macro_rules! zerovec { () => ( $crate::ZeroVec::new() ); ($aligned:ty; $convert:expr; [$($x:expr),+ $(,)?]) => ( $crate::zeroslice![$aligned; $convert; [$($x),+]].as_zerovec() ); } #[cfg(test)] mod tests { use super::*; use crate::samples::*; #[test] fn test_get() { { let zerovec = ZeroVec::from_slice_or_alloc(TEST_SLICE); assert_eq!(zerovec.get(0), Some(TEST_SLICE[0])); assert_eq!(zerovec.get(1), Some(TEST_SLICE[1])); assert_eq!(zerovec.get(2), Some(TEST_SLICE[2])); } { let zerovec = ZeroVec::<u32>::parse_bytes(TEST_BUFFER_LE).unwrap(); assert_eq!(zerovec.get(0), Some(TEST_SLICE[0])); assert_eq!(zerovec.get(1), Some(TEST_SLICE[1])); assert_eq!(zerovec.get(2), Some(TEST_SLICE[2])); } } #[test] fn test_binary_search() { { let zerovec = ZeroVec::from_slice_or_alloc(TEST_SLICE); assert_eq!(Ok(3), zerovec.binary_search(&0x0e0d0c)); assert_eq!(Err(3), zerovec.binary_search(&0x0c0d0c)); } { let zerovec = ZeroVec::<u32>::parse_bytes(TEST_BUFFER_LE).unwrap(); assert_eq!(Ok(3), zerovec.binary_search(&0x0e0d0c)); assert_eq!(Err(3), zerovec.binary_search(&0x0c0d0c)); } } #[test] fn test_odd_alignment() { assert_eq!( Some(0x020100), ZeroVec::<u32>::parse_bytes(TEST_BUFFER_LE).unwrap().get(0) ); assert_eq!( Some(0x04000201), ZeroVec::<u32>::parse_bytes(&TEST_BUFFER_LE[1..77]) .unwrap() .get(0) ); assert_eq!( Some(0x05040002), ZeroVec::<u32>::parse_bytes(&TEST_BUFFER_LE[2..78]) .unwrap() .get(0) ); assert_eq!( Some(0x06050400), ZeroVec::<u32>::parse_bytes(&TEST_BUFFER_LE[3..79]) .unwrap() .get(0) ); assert_eq!( Some(0x060504), ZeroVec::<u32>::parse_bytes(&TEST_BUFFER_LE[4..]) .unwrap() .get(0) ); assert_eq!( Some(0x4e4d4c00), ZeroVec::<u32>::parse_bytes(&TEST_BUFFER_LE[75..79]) .unwrap() .get(0) ); assert_eq!( Some(0x4e4d4c00), ZeroVec::<u32>::parse_bytes(&TEST_BUFFER_LE[3..79]) .unwrap() .get(18) ); assert_eq!( Some(0x4e4d4c), ZeroVec::<u32>::parse_bytes(&TEST_BUFFER_LE[76..]) .unwrap() .get(0) ); assert_eq!( Some(0x4e4d4c), ZeroVec::<u32>::parse_bytes(TEST_BUFFER_LE).unwrap().get(19) ); // TODO(#1144): Check for correct slice length in RawBytesULE // assert_eq!( // None, // ZeroVec::<u32>::parse_bytes(&TEST_BUFFER_LE[77..]) // .unwrap() // .get(0) // ); assert_eq!( None, ZeroVec::<u32>::parse_bytes(TEST_BUFFER_LE).unwrap().get(20) ); assert_eq!( None, ZeroVec::<u32>::parse_bytes(&TEST_BUFFER_LE[3..79]) .unwrap() .get(19) ); } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use super::{ZeroSlice, ZeroVec}; use crate::ule::*; use alloc::boxed::Box; use alloc::vec::Vec; use core::fmt; use core::marker::PhantomData; use core::mem; use serde::de::{self, Deserialize, Deserializer, SeqAccess, Visitor}; #[cfg(feature = "serde")] use serde::ser::{Serialize, SerializeSeq, Serializer}; struct ZeroVecVisitor<T> { marker: PhantomData<fn() -> T>, } impl<T> Default for ZeroVecVisitor<T> { fn default() -> Self { Self { marker: PhantomData, } } } impl<'de, T> Visitor<'de> for ZeroVecVisitor<T> where T: 'de + Deserialize<'de> + AsULE, { type Value = ZeroVec<'de, T>; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("a sequence or borrowed buffer of fixed-width elements") } fn visit_borrowed_bytes<E>(self, bytes: &'de [u8]) -> Result<Self::Value, E> where E: de::Error, { ZeroVec::parse_bytes(bytes).map_err(de::Error::custom) } fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error> where A: SeqAccess<'de>, { let mut vec: Vec<T::ULE> = if let Some(capacity) = seq.size_hint() { Vec::with_capacity(capacity) } else { Vec::new() }; while let Some(value) = seq.next_element::<T>()? { vec.push(T::to_unaligned(value)); } Ok(ZeroVec::new_owned(vec)) } } /// This impl requires enabling the optional `serde` Cargo feature of the `zerovec` crate impl<'de, 'a, T> Deserialize<'de> for ZeroVec<'a, T> where T: 'de + Deserialize<'de> + AsULE, 'de: 'a, { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { let visitor = ZeroVecVisitor::default(); if deserializer.is_human_readable() { deserializer.deserialize_seq(visitor) } else { deserializer.deserialize_bytes(visitor) } } } /// This impl requires enabling the optional `serde` Cargo feature of the `zerovec` crate impl<T> Serialize for ZeroVec<'_, T> where T: Serialize + AsULE, { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { if serializer.is_human_readable() { let mut seq = serializer.serialize_seq(Some(self.len()))?; for value in self.iter() { seq.serialize_element(&value)?; } seq.end() } else { serializer.serialize_bytes(self.as_bytes()) } } } /// This impl requires enabling the optional `serde` Cargo feature of the `zerovec` crate impl<'de, T> Deserialize<'de> for Box<ZeroSlice<T>> where T: Deserialize<'de> + AsULE + 'static, { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { let mut zv = ZeroVec::<T>::deserialize(deserializer)?; let vec = zv.with_mut(mem::take); Ok(ZeroSlice::from_boxed_slice(vec.into_boxed_slice())) } } /// This impl requires enabling the optional `serde` Cargo feature of the `zerovec` crate impl<'de, 'a, T> Deserialize<'de> for &'a ZeroSlice<T> where T: Deserialize<'de> + AsULE + 'static, 'de: 'a, { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { if deserializer.is_human_readable() { Err(de::Error::custom( "&ZeroSlice cannot be deserialized from human-readable formats", )) } else { let deserialized: ZeroVec<'a, T> = ZeroVec::deserialize(deserializer)?; let borrowed = if let Some(b) = deserialized.as_maybe_borrowed() { b } else { return Err(de::Error::custom( "&ZeroSlice can only deserialize in zero-copy ways", )); }; Ok(borrowed) } } } /// This impl requires enabling the optional `serde` Cargo feature of the `zerovec` crate impl<T> Serialize for ZeroSlice<T> where T: Serialize + AsULE, { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { self.as_zerovec().serialize(serializer) } } #[cfg(test)] #[allow(non_camel_case_types)] mod test { use crate::samples::*; use crate::ZeroVec; #[derive(serde::Serialize, serde::Deserialize)] struct DeriveTest_ZeroVec<'data> { #[serde(borrow)] _data: ZeroVec<'data, u16>, } #[test] fn test_serde_json() { let zerovec_orig = ZeroVec::from_slice_or_alloc(TEST_SLICE); let json_str = serde_json::to_string(&zerovec_orig).expect("serialize"); assert_eq!(JSON_STR, json_str); // ZeroVec should deserialize from JSON to either Vec or ZeroVec let vec_new: Vec<u32> = serde_json::from_str(&json_str).expect("deserialize from buffer to Vec"); assert_eq!( zerovec_orig, ZeroVec::<u32>::from_slice_or_alloc(vec_new.as_slice()) ); let zerovec_new: ZeroVec<u32> = serde_json::from_str(&json_str).expect("deserialize from buffer to ZeroVec"); assert_eq!(zerovec_orig, zerovec_new); assert!(zerovec_new.is_owned()); } #[test] fn test_serde_bincode() { let zerovec_orig = ZeroVec::from_slice_or_alloc(TEST_SLICE); let bincode_buf = bincode::serialize(&zerovec_orig).expect("serialize"); assert_eq!(BINCODE_BUF, bincode_buf); // ZeroVec should deserialize from Bincode to ZeroVec but not Vec bincode::deserialize::<Vec<u32>>(&bincode_buf).expect_err("deserialize from buffer to Vec"); let zerovec_new: ZeroVec<u32> = bincode::deserialize(&bincode_buf).expect("deserialize from buffer to ZeroVec"); assert_eq!(zerovec_orig, zerovec_new); assert!(!zerovec_new.is_owned()); } #[test] fn test_serde_rmp() { let zerovec_orig = ZeroVec::from_slice_or_alloc(TEST_SLICE); let rmp_buf = rmp_serde::to_vec(&zerovec_orig).expect("serialize"); // ZeroVec should deserialize from Bincode to ZeroVec but not Vec bincode::deserialize::<Vec<u32>>(&rmp_buf).expect_err("deserialize from buffer to Vec"); let zerovec_new: ZeroVec<u32> = rmp_serde::from_slice(&rmp_buf).expect("deserialize from buffer to ZeroVec"); assert_eq!(zerovec_orig, zerovec_new); assert!(!zerovec_new.is_owned()); } #[test] fn test_chars_valid() { // 1-byte, 2-byte, 3-byte, and 4-byte character in UTF-8 (not as relevant in UTF-32) let zerovec_orig = ZeroVec::alloc_from_slice(&['w', 'ω', '文', '𑄃']); let bincode_buf = bincode::serialize(&zerovec_orig).expect("serialize"); let zerovec_new: ZeroVec<char> = bincode::deserialize(&bincode_buf).expect("deserialize from buffer to ZeroVec"); assert_eq!(zerovec_orig, zerovec_new); assert!(!zerovec_new.is_owned()); } #[test] fn test_chars_invalid() { // 119 and 120 are valid, but not 0xD800 (high surrogate) let zerovec_orig: ZeroVec<u32> = ZeroVec::from_slice_or_alloc(&[119, 0xD800, 120]); let bincode_buf = bincode::serialize(&zerovec_orig).expect("serialize"); let zerovec_result = bincode::deserialize::<ZeroVec<char>>(&bincode_buf); assert!(zerovec_result.is_err()); } }
// This file is part of ICU4X. For terms of use, please see the file // called LICENSE at the top level of the ICU4X source tree // (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ). use super::*; use core::cmp::Ordering; use core::ops::Range; /// A zero-copy "slice", i.e. the zero-copy version of `[T]`. /// /// This behaves /// similarly to [`ZeroVec<T>`], however [`ZeroVec<T>`] is allowed to contain /// owned data and as such is ideal for deserialization since most human readable /// serialization formats cannot unconditionally deserialize zero-copy. /// /// This type can be used inside [`VarZeroVec<T>`](crate::VarZeroVec) and [`ZeroMap`](crate::ZeroMap): /// This essentially allows for the construction of zero-copy types isomorphic to `Vec<Vec<T>>` by instead /// using `VarZeroVec<ZeroSlice<T>>`. See the [`VarZeroVec`](crate::VarZeroVec) docs for an example. /// /// # Examples /// /// Const-construct a ZeroSlice of u16: /// /// ``` /// use zerovec::ule::AsULE; /// use zerovec::ZeroSlice; /// /// const DATA: &ZeroSlice<u16> = /// ZeroSlice::<u16>::from_ule_slice(&<u16 as AsULE>::ULE::from_array([ /// 211, 281, 421, 32973, /// ])); /// /// assert_eq!(DATA.get(1), Some(281)); /// ``` #[repr(transparent)] pub struct ZeroSlice<T: AsULE>([T::ULE]); impl<T> ZeroSlice<T> where T: AsULE, { /// Returns an empty slice. pub const fn new_empty() -> &'static Self { Self::from_ule_slice(&[]) } /// Get this [`ZeroSlice`] as a borrowed [`ZeroVec`] /// /// [`ZeroSlice`] does not have most of the methods that [`ZeroVec`] does, /// so it is recommended to convert it to a [`ZeroVec`] before doing anything. #[inline] pub const fn as_zerovec(&self) -> ZeroVec<'_, T> { ZeroVec::new_borrowed(&self.0) } /// Attempt to construct a `&ZeroSlice<T>` from a byte slice, returning an error /// if it's not a valid byte sequence pub fn parse_bytes(bytes: &[u8]) -> Result<&Self, UleError> { T::ULE::parse_bytes_to_slice(bytes).map(Self::from_ule_slice) } /// Uses a `&[u8]` buffer as a `ZeroVec<T>` without any verification. /// /// # Safety /// /// `bytes` need to be an output from [`ZeroSlice::as_bytes()`]. pub const unsafe fn from_bytes_unchecked(bytes: &[u8]) -> &Self { // &[u8] and &[T::ULE] are the same slice with different length metadata. Self::from_ule_slice(core::slice::from_raw_parts( bytes.as_ptr() as *const T::ULE, bytes.len() / core::mem::size_of::<T::ULE>(), )) } /// Construct a `&ZeroSlice<T>` from a slice of ULEs. /// /// This function can be used for constructing ZeroVecs in a const context, avoiding /// parsing checks. /// /// See [`ZeroSlice`] for an example. #[inline] pub const fn from_ule_slice(slice: &[T::ULE]) -> &Self { // This is safe because ZeroSlice is transparent over [T::ULE] // so &ZeroSlice<T> can be safely cast from &[T::ULE] unsafe { &*(slice as *const _ as *const Self) } } /// Construct a `Box<ZeroSlice<T>>` from a boxed slice of ULEs #[inline] #[cfg(feature = "alloc")] pub fn from_boxed_slice(slice: alloc::boxed::Box<[T::ULE]>) -> alloc::boxed::Box<Self> { // This is safe because ZeroSlice is transparent over [T::ULE] // so Box<ZeroSlice<T>> can be safely cast from Box<[T::ULE]> unsafe { alloc::boxed::Box::from_raw(alloc::boxed::Box::into_raw(slice) as *mut Self) } } /// Returns this slice as its underlying `&[u8]` byte buffer representation. /// /// Useful for serialization. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// // The little-endian bytes correspond to the numbers on the following line. /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x80]; /// let nums: &[u16] = &[211, 281, 421, 32973]; /// /// let zerovec = ZeroVec::alloc_from_slice(nums); /// /// assert_eq!(bytes, zerovec.as_bytes()); /// ``` #[inline] pub fn as_bytes(&self) -> &[u8] { T::ULE::slice_as_bytes(self.as_ule_slice()) } /// Dereferences this slice as `&[T::ULE]`. #[inline] pub const fn as_ule_slice(&self) -> &[T::ULE] { &self.0 } /// Returns the number of elements in this slice. /// /// # Example /// /// ``` /// use zerovec::ule::AsULE; /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x80]; /// let zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// /// assert_eq!(4, zerovec.len()); /// assert_eq!( /// bytes.len(), /// zerovec.len() * std::mem::size_of::<<u16 as AsULE>::ULE>() /// ); /// ``` #[inline] pub const fn len(&self) -> usize { self.as_ule_slice().len() } /// Returns whether this slice is empty. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x80]; /// let zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// assert!(!zerovec.is_empty()); /// /// let emptyvec: ZeroVec<u16> = ZeroVec::parse_bytes(&[]).expect("infallible"); /// assert!(emptyvec.is_empty()); /// ``` #[inline] pub const fn is_empty(&self) -> bool { self.as_ule_slice().is_empty() } } impl<T> ZeroSlice<T> where T: AsULE, { /// Gets the element at the specified index. Returns `None` if out of range. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x80]; /// let zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// /// assert_eq!(zerovec.get(2), Some(421)); /// assert_eq!(zerovec.get(4), None); /// ``` #[inline] pub fn get(&self, index: usize) -> Option<T> { self.as_ule_slice() .get(index) .copied() .map(T::from_unaligned) } /// Gets the entire slice as an array of length `N`. Returns `None` if the slice /// does not have exactly `N` elements. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x80]; /// let zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// let array: [u16; 4] = /// zerovec.get_as_array().expect("should be 4 items in array"); /// /// assert_eq!(array[2], 421); /// ``` pub fn get_as_array<const N: usize>(&self) -> Option<[T; N]> { let ule_array = <&[T::ULE; N]>::try_from(self.as_ule_slice()).ok()?; Some(ule_array.map(|u| T::from_unaligned(u))) } /// Gets a subslice of elements within a certain range. Returns `None` if the range /// is out of bounds of this `ZeroSlice`. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x80]; /// let zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// /// assert_eq!( /// zerovec.get_subslice(1..3), /// Some(&*ZeroVec::from_slice_or_alloc(&[0x0119, 0x01A5])) /// ); /// assert_eq!(zerovec.get_subslice(3..5), None); /// ``` #[inline] pub fn get_subslice(&self, range: Range<usize>) -> Option<&ZeroSlice<T>> { self.0.get(range).map(ZeroSlice::from_ule_slice) } /// Get a borrowed reference to the underlying ULE type at a specified index. /// /// Prefer [`Self::get()`] over this method where possible since working /// directly with `ULE` types is less ergonomic pub fn get_ule_ref(&self, index: usize) -> Option<&T::ULE> { self.as_ule_slice().get(index) } /// Casts a `ZeroSlice<T>` to a compatible `ZeroSlice<P>`. /// /// `T` and `P` are compatible if they have the same `ULE` representation. /// /// If the `ULE`s of `T` and `P` are different, use [`Self::try_as_converted()`]. /// /// # Examples /// /// ``` /// use zerovec::ZeroSlice; /// /// const BYTES: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x80]; /// const ZS_U16: &ZeroSlice<u16> = { /// match ZeroSlice::<u16>::try_from_bytes(BYTES) { /// Ok(s) => s, /// Err(_) => unreachable!(), /// } /// }; /// /// let zs_i16: &ZeroSlice<i16> = ZS_U16.cast(); /// /// assert_eq!(ZS_U16.get(3), Some(32973)); /// assert_eq!(zs_i16.get(3), Some(-32563)); /// ``` #[inline] pub const fn cast<P>(&self) -> &ZeroSlice<P> where P: AsULE<ULE = T::ULE>, { ZeroSlice::<P>::from_ule_slice(self.as_ule_slice()) } /// Converts a `&ZeroSlice<T>` into a `&ZeroSlice<P>`. /// /// The resulting slice will have the same length as the original slice /// if and only if `T::ULE` and `P::ULE` are the same size. /// /// If `T` and `P` have the exact same `ULE`, use [`Self::cast()`]. /// /// # Examples /// /// ``` /// use zerovec::ZeroSlice; /// /// const BYTES: &[u8] = &[0x7F, 0xF3, 0x01, 0x00, 0x49, 0xF6, 0x01, 0x00]; /// const ZS_U32: &ZeroSlice<u32> = { /// match ZeroSlice::<u32>::try_from_bytes(BYTES) { /// Ok(s) => s, /// Err(_) => unreachable!(), /// } /// }; /// /// let zs_u8_4: &ZeroSlice<[u8; 4]> = /// ZS_U32.try_as_converted().expect("valid code points"); /// /// assert_eq!(ZS_U32.get(0), Some(127871)); /// assert_eq!(zs_u8_4.get(0), Some([0x7F, 0xF3, 0x01, 0x00])); /// ``` #[inline] pub fn try_as_converted<P: AsULE>(&self) -> Result<&ZeroSlice<P>, UleError> { let new_slice = P::ULE::parse_bytes_to_slice(self.as_bytes())?; Ok(ZeroSlice::from_ule_slice(new_slice)) } /// Gets the first element. Returns `None` if empty. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x80]; /// let zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// /// assert_eq!(zerovec.first(), Some(211)); /// ``` #[inline] pub fn first(&self) -> Option<T> { self.as_ule_slice().first().copied().map(T::from_unaligned) } /// Gets the last element. Returns `None` if empty. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x80]; /// let zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// /// assert_eq!(zerovec.last(), Some(32973)); /// ``` #[inline] pub fn last(&self) -> Option<T> { self.as_ule_slice().last().copied().map(T::from_unaligned) } /// Gets an iterator over the elements. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x80]; /// let zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// let mut it = zerovec.iter(); /// /// assert_eq!(it.next(), Some(211)); /// assert_eq!(it.next(), Some(281)); /// assert_eq!(it.next(), Some(421)); /// assert_eq!(it.next(), Some(32973)); /// assert_eq!(it.next(), None); /// ``` #[inline] pub fn iter<'a>(&'a self) -> ZeroSliceIter<'a, T> { ZeroSliceIter(self.as_ule_slice().iter()) } /// Returns a tuple with the first element and a subslice of the remaining elements. /// /// # Example /// /// ``` /// use zerovec::ule::AsULE; /// use zerovec::ZeroSlice; /// /// const DATA: &ZeroSlice<u16> = /// ZeroSlice::<u16>::from_ule_slice(&<u16 as AsULE>::ULE::from_array([ /// 211, 281, 421, 32973, /// ])); /// const EXPECTED_VALUE: (u16, &ZeroSlice<u16>) = ( /// 211, /// ZeroSlice::<u16>::from_ule_slice(&<u16 as AsULE>::ULE::from_array([ /// 281, 421, 32973, /// ])), /// ); /// assert_eq!(EXPECTED_VALUE, DATA.split_first().unwrap()); /// ``` #[inline] pub fn split_first(&self) -> Option<(T, &ZeroSlice<T>)> { if let Some(first) = self.first() { return Some(( first, // `unwrap()` must succeed, because `first()` returned `Some`. #[allow(clippy::unwrap_used)] self.get_subslice(1..self.len()).unwrap(), )); } None } } /// An iterator over elements in a VarZeroVec #[derive(Debug)] pub struct ZeroSliceIter<'a, T: AsULE>(core::slice::Iter<'a, T::ULE>); impl<'a, T: AsULE> Iterator for ZeroSliceIter<'a, T> { type Item = T; fn next(&mut self) -> Option<T> { self.0.next().copied().map(T::from_unaligned) } } impl<'a, T: AsULE> ExactSizeIterator for ZeroSliceIter<'a, T> { fn len(&self) -> usize { self.0.len() } } impl<'a, T: AsULE> DoubleEndedIterator for ZeroSliceIter<'a, T> { fn next_back(&mut self) -> Option<T> { self.0.next_back().copied().map(T::from_unaligned) } } impl<T> ZeroSlice<T> where T: AsULE + Ord, { /// Binary searches a sorted `ZeroVec<T>` for the given element. For more information, see /// the primitive function [`binary_search`]. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x80]; /// let zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// /// assert_eq!(zerovec.binary_search(&281), Ok(1)); /// assert_eq!(zerovec.binary_search(&282), Err(2)); /// ``` /// /// [`binary_search`]: https://doc.rust-lang.org/std/primitive.slice.html#method.binary_search #[inline] pub fn binary_search(&self, x: &T) -> Result<usize, usize> { self.as_ule_slice() .binary_search_by(|probe| T::from_unaligned(*probe).cmp(x)) } } impl<T> ZeroSlice<T> where T: AsULE, { /// Binary searches a sorted `ZeroVec<T>` based on a given predicate. For more information, see /// the primitive function [`binary_search_by`]. /// /// # Example /// /// ``` /// use zerovec::ZeroVec; /// /// let bytes: &[u8] = &[0xD3, 0x00, 0x19, 0x01, 0xA5, 0x01, 0xCD, 0x80]; /// let zerovec: ZeroVec<u16> = /// ZeroVec::parse_bytes(bytes).expect("infallible"); /// /// assert_eq!(zerovec.binary_search_by(|x| x.cmp(&281)), Ok(1)); /// assert_eq!(zerovec.binary_search_by(|x| x.cmp(&282)), Err(2)); /// ``` /// /// [`binary_search_by`]: https://doc.rust-lang.org/std/primitive.slice.html#method.binary_search_by #[inline] pub fn binary_search_by( &self, mut predicate: impl FnMut(T) -> Ordering, ) -> Result<usize, usize> { self.as_ule_slice() .binary_search_by(|probe| predicate(T::from_unaligned(*probe))) } } // Safety (based on the safety checklist on the VarULE trait): // (`ZeroSlice<T>` is a transparent wrapper around [T::ULE]) // 1. [T::ULE] does not include any uninitialized or padding bytes (achieved by being a slice of a ULE type) // 2. [T::ULE] is aligned to 1 byte (achieved by being a slice of a ULE type) // 3. The impl of `validate_bytes()` returns an error if any byte is not valid. // 4. The impl of `validate_bytes()` returns an error if the slice cannot be used in its entirety // 5. The impl of `from_bytes_unchecked()` returns a reference to the same data. // 6. `as_bytes()` and `parse_bytes()` are defaulted // 7. `[T::ULE]` byte equality is semantic equality (relying on the guideline of the underlying `ULE` type) unsafe impl<T: AsULE + 'static> VarULE for ZeroSlice<T> { #[inline] fn validate_bytes(bytes: &[u8]) -> Result<(), UleError> { T::ULE::validate_bytes(bytes) } #[inline] unsafe fn from_bytes_unchecked(bytes: &[u8]) -> &Self { Self::from_ule_slice(T::ULE::slice_from_bytes_unchecked(bytes)) } } impl<T> Eq for ZeroSlice<T> where T: AsULE + Eq {} impl<T> PartialEq<ZeroSlice<T>> for ZeroSlice<T> where T: AsULE + PartialEq, { #[inline] fn eq(&self, other: &ZeroSlice<T>) -> bool { self.as_zerovec().eq(&other.as_zerovec()) } } impl<T> PartialEq<[T]> for ZeroSlice<T> where T: AsULE + PartialEq, { #[inline] fn eq(&self, other: &[T]) -> bool { self.iter().eq(other.iter().copied()) } } impl<'a, T> PartialEq<ZeroVec<'a, T>> for ZeroSlice<T> where T: AsULE + PartialEq, { #[inline] fn eq(&self, other: &ZeroVec<'a, T>) -> bool { self.as_zerovec().eq(other) } } impl<'a, T> PartialEq<ZeroSlice<T>> for ZeroVec<'a, T> where T: AsULE + PartialEq, { #[inline] fn eq(&self, other: &ZeroSlice<T>) -> bool { self.eq(&other.as_zerovec()) } } impl<T> fmt::Debug for ZeroSlice<T> where T: AsULE + fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.as_zerovec().fmt(f) } } impl<T: AsULE + PartialOrd> PartialOrd for ZeroSlice<T> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.iter().partial_cmp(other.iter()) } } impl<T: AsULE + Ord> Ord for ZeroSlice<T> { fn cmp(&self, other: &Self) -> Ordering { self.iter().cmp(other.iter()) } } #[cfg(feature = "alloc")] impl<T: AsULE> AsRef<ZeroSlice<T>> for alloc::vec::Vec<T::ULE> { fn as_ref(&self) -> &ZeroSlice<T> { ZeroSlice::<T>::from_ule_slice(self) } } impl<T: AsULE> AsRef<ZeroSlice<T>> for &[T::ULE] { fn as_ref(&self) -> &ZeroSlice<T> { ZeroSlice::<T>::from_ule_slice(self) } } impl<T> Default for &ZeroSlice<T> where T: AsULE, { fn default() -> Self { ZeroSlice::from_ule_slice(&[]) } } #[cfg(test)] mod test { use super::*; use crate::zeroslice; #[test] fn test_split_first() { { // empty slice. assert_eq!(None, ZeroSlice::<u16>::new_empty().split_first()); } { // single element slice const DATA: &ZeroSlice<u16> = zeroslice!(u16; <u16 as AsULE>::ULE::from_unsigned; [211]); assert_eq!((211, zeroslice![]), DATA.split_first().unwrap()); } { // slice with many elements. const DATA: &ZeroSlice<u16> = zeroslice!(u16; <u16 as AsULE>::ULE::from_unsigned; [211, 281, 421, 32973]); const EXPECTED_VALUE: (u16, &ZeroSlice<u16>) = ( 211, zeroslice!(u16; <u16 as AsULE>::ULE::from_unsigned; [281, 421, 32973]), ); assert_eq!(EXPECTED_VALUE, DATA.split_first().unwrap()); } } }
use bencher::{benchmark_group, benchmark_main}; use std::io::{Cursor, Read, Seek, Write}; use bencher::Bencher; use getrandom::getrandom; use zip::{result::ZipResult, write::SimpleFileOptions, ZipArchive, ZipWriter}; fn generate_random_archive( num_entries: usize, entry_size: usize, options: SimpleFileOptions, ) -> ZipResult<(usize, ZipArchive<Cursor<Vec<u8>>>)> { let buf = Cursor::new(Vec::new()); let mut zip = ZipWriter::new(buf); let mut bytes = vec![0u8; entry_size]; for i in 0..num_entries { let name = format!("random{}.dat", i); zip.start_file(name, options)?; getrandom(&mut bytes).unwrap(); zip.write_all(&bytes)?; } let buf = zip.finish()?.into_inner(); let len = buf.len(); Ok((len, ZipArchive::new(Cursor::new(buf))?)) } fn perform_merge<R: Read + Seek, W: Write + Seek>( src: ZipArchive<R>, mut target: ZipWriter<W>, ) -> ZipResult<ZipWriter<W>> { target.merge_archive(src)?; Ok(target) } fn perform_raw_copy_file<R: Read + Seek, W: Write + Seek>( mut src: ZipArchive<R>, mut target: ZipWriter<W>, ) -> ZipResult<ZipWriter<W>> { for i in 0..src.len() { let entry = src.by_index(i)?; target.raw_copy_file(entry)?; } Ok(target) } const NUM_ENTRIES: usize = 100; const ENTRY_SIZE: usize = 1024; fn merge_archive_stored(bench: &mut Bencher) { let options = SimpleFileOptions::default().compression_method(zip::CompressionMethod::Stored); let (len, src) = generate_random_archive(NUM_ENTRIES, ENTRY_SIZE, options).unwrap(); bench.bytes = len as u64; bench.iter(|| { let buf = Cursor::new(Vec::new()); let zip = ZipWriter::new(buf); let zip = perform_merge(src.clone(), zip).unwrap(); let buf = zip.finish().unwrap().into_inner(); assert_eq!(buf.len(), len); }); } #[cfg(feature = "_deflate-any")] fn merge_archive_compressed(bench: &mut Bencher) { let options = SimpleFileOptions::default().compression_method(zip::CompressionMethod::Deflated); let (len, src) = generate_random_archive(NUM_ENTRIES, ENTRY_SIZE, options).unwrap(); bench.bytes = len as u64; bench.iter(|| { let buf = Cursor::new(Vec::new()); let zip = ZipWriter::new(buf); let zip = perform_merge(src.clone(), zip).unwrap(); let buf = zip.finish().unwrap().into_inner(); assert_eq!(buf.len(), len); }); } fn merge_archive_raw_copy_file_stored(bench: &mut Bencher) { let options = SimpleFileOptions::default().compression_method(zip::CompressionMethod::Stored); let (len, src) = generate_random_archive(NUM_ENTRIES, ENTRY_SIZE, options).unwrap(); bench.bytes = len as u64; bench.iter(|| { let buf = Cursor::new(Vec::new()); let zip = ZipWriter::new(buf); let zip = perform_raw_copy_file(src.clone(), zip).unwrap(); let buf = zip.finish().unwrap().into_inner(); assert_eq!(buf.len(), len); }); } #[cfg(feature = "_deflate-any")] fn merge_archive_raw_copy_file_compressed(bench: &mut Bencher) { let options = SimpleFileOptions::default().compression_method(zip::CompressionMethod::Deflated); let (len, src) = generate_random_archive(NUM_ENTRIES, ENTRY_SIZE, options).unwrap(); bench.bytes = len as u64; bench.iter(|| { let buf = Cursor::new(Vec::new()); let zip = ZipWriter::new(buf); let zip = perform_raw_copy_file(src.clone(), zip).unwrap(); let buf = zip.finish().unwrap().into_inner(); assert_eq!(buf.len(), len); }); } #[cfg(feature = "_deflate-any")] benchmark_group!( benches, merge_archive_stored, merge_archive_compressed, merge_archive_raw_copy_file_stored, merge_archive_raw_copy_file_compressed, ); #[cfg(not(feature = "_deflate-any"))] benchmark_group!( benches, merge_archive_stored, merge_archive_raw_copy_file_stored, ); benchmark_main!(benches);
use bencher::{benchmark_group, benchmark_main}; use std::io::{Cursor, Read, Write}; use bencher::Bencher; use getrandom::getrandom; use zip::{write::SimpleFileOptions, ZipArchive, ZipWriter}; fn generate_random_archive(size: usize) -> Vec<u8> { let data = Vec::new(); let mut writer = ZipWriter::new(Cursor::new(data)); let options = SimpleFileOptions::default().compression_method(zip::CompressionMethod::Stored); writer.start_file("random.dat", options).unwrap(); let mut bytes = vec![0u8; size]; getrandom(&mut bytes).unwrap(); writer.write_all(&bytes).unwrap(); writer.finish().unwrap().into_inner() } fn read_entry(bench: &mut Bencher) { let size = 1024 * 1024; let bytes = generate_random_archive(size); let mut archive = ZipArchive::new(Cursor::new(bytes.as_slice())).unwrap(); bench.iter(|| { let mut file = archive.by_name("random.dat").unwrap(); let mut buf = [0u8; 1024]; loop { let n = file.read(&mut buf).unwrap(); if n == 0 { break; } } }); bench.bytes = size as u64; } benchmark_group!(benches, read_entry); benchmark_main!(benches);
use bencher::{benchmark_group, benchmark_main}; use std::fs; use std::io::{self, prelude::*, Cursor}; use bencher::Bencher; use getrandom::getrandom; use tempfile::TempDir; use zip::write::SimpleFileOptions; use zip::{result::ZipResult, CompressionMethod, ZipArchive, ZipWriter}; const FILE_COUNT: usize = 15_000; const FILE_SIZE: usize = 1024; fn generate_random_archive(count_files: usize, file_size: usize) -> ZipResult<Vec<u8>> { let data = Vec::new(); let mut writer = ZipWriter::new(Cursor::new(data)); let options = SimpleFileOptions::default().compression_method(CompressionMethod::Stored); let mut bytes = vec![0u8; file_size]; for i in 0..count_files { let name = format!("file_deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_{i}.dat"); writer.start_file(name, options)?; getrandom(&mut bytes).map_err(io::Error::from)?; writer.write_all(&bytes)?; } Ok(writer.finish()?.into_inner()) } fn read_metadata(bench: &mut Bencher) { let bytes = generate_random_archive(FILE_COUNT, FILE_SIZE).unwrap(); bench.iter(|| { let archive = ZipArchive::new(Cursor::new(bytes.as_slice())).unwrap(); archive.len() }); bench.bytes = bytes.len() as u64; } const COMMENT_SIZE: usize = 50_000; fn generate_zip32_archive_with_random_comment(comment_length: usize) -> ZipResult<Vec<u8>> { let data = Vec::new(); let mut writer = ZipWriter::new(Cursor::new(data)); let options = SimpleFileOptions::default().compression_method(CompressionMethod::Stored); let mut bytes = vec![0u8; comment_length]; getrandom(&mut bytes).unwrap(); writer.set_raw_comment(bytes.into_boxed_slice()); writer.start_file("asdf.txt", options)?; writer.write_all(b"asdf")?; Ok(writer.finish()?.into_inner()) } fn parse_archive_with_comment(bench: &mut Bencher) { let bytes = generate_zip32_archive_with_random_comment(COMMENT_SIZE).unwrap(); bench.bench_n(1, |_| { let archive = ZipArchive::new(Cursor::new(bytes.as_slice())).unwrap(); let _ = archive.comment().len(); }); bench.bytes = bytes.len() as u64; } const COMMENT_SIZE_64: usize = 500_000; fn generate_zip64_archive_with_random_comment(comment_length: usize) -> ZipResult<Vec<u8>> { let data = Vec::new(); let mut writer = ZipWriter::new(Cursor::new(data)); let options = SimpleFileOptions::default() .compression_method(CompressionMethod::Stored) .large_file(true); let mut bytes = vec![0u8; comment_length]; getrandom(&mut bytes).unwrap(); writer.set_raw_comment(bytes.into_boxed_slice()); writer.start_file("asdf.txt", options)?; writer.write_all(b"asdf")?; Ok(writer.finish()?.into_inner()) } fn parse_zip64_archive_with_comment(bench: &mut Bencher) { let bytes = generate_zip64_archive_with_random_comment(COMMENT_SIZE_64).unwrap(); bench.iter(|| { let archive = ZipArchive::new(Cursor::new(bytes.as_slice())).unwrap(); archive.comment().len() }); bench.bytes = bytes.len() as u64; } fn parse_stream_archive(bench: &mut Bencher) { const STREAM_ZIP_ENTRIES: usize = 5; const STREAM_FILE_SIZE: usize = 5; let bytes = generate_random_archive(STREAM_ZIP_ENTRIES, STREAM_FILE_SIZE).unwrap(); /* Write to a temporary file path to incur some filesystem overhead from repeated reads */ let dir = TempDir::with_prefix("stream-bench").unwrap(); let out = dir.path().join("bench-out.zip"); fs::write(&out, &bytes).unwrap(); bench.iter(|| { let mut f = fs::File::open(&out).unwrap(); while zip::read::read_zipfile_from_stream(&mut f) .unwrap() .is_some() {} }); bench.bytes = bytes.len() as u64; } fn parse_large_non_zip(bench: &mut Bencher) { const FILE_SIZE: usize = 17_000_000; // Create a large file that doesn't have a zip header (generating random data _might_ make a zip magic // number somewhere which is _not_ what we're trying to test). let dir = TempDir::with_prefix("large-non-zip-bench").unwrap(); let file = dir.path().join("zeros"); let buf = vec![0u8; FILE_SIZE]; fs::write(&file, &buf).unwrap(); bench.iter(|| { assert!(zip::ZipArchive::new(std::fs::File::open(&file).unwrap()).is_err()); }) } benchmark_group!( benches, read_metadata, parse_archive_with_comment, parse_zip64_archive_with_comment, parse_stream_archive, parse_large_non_zip, ); benchmark_main!(benches);
//! Implementation of the AES decryption for zip files. //! //! This was implemented according to the [WinZip specification](https://www.winzip.com/win/en/aes_info.html). //! Note that using CRC with AES depends on the used encryption specification, AE-1 or AE-2. //! If the file is marked as encrypted with AE-2 the CRC field is ignored, even if it isn't set to 0. use crate::aes_ctr::AesCipher; use crate::types::AesMode; use crate::{aes_ctr, result::ZipError}; use constant_time_eq::constant_time_eq; use hmac::{Hmac, Mac}; use rand::RngCore; use sha1::Sha1; use std::io::{self, Error, ErrorKind, Read, Write}; use zeroize::{Zeroize, Zeroizing}; /// The length of the password verifcation value in bytes pub const PWD_VERIFY_LENGTH: usize = 2; /// The length of the authentication code in bytes const AUTH_CODE_LENGTH: usize = 10; /// The number of iterations used with PBKDF2 const ITERATION_COUNT: u32 = 1000; enum Cipher { Aes128(Box<aes_ctr::AesCtrZipKeyStream<aes_ctr::Aes128>>), Aes192(Box<aes_ctr::AesCtrZipKeyStream<aes_ctr::Aes192>>), Aes256(Box<aes_ctr::AesCtrZipKeyStream<aes_ctr::Aes256>>), } impl Cipher { /// Create a `Cipher` depending on the used `AesMode` and the given `key`. /// /// # Panics /// /// This panics if `key` doesn't have the correct size for the chosen aes mode. fn from_mode(aes_mode: AesMode, key: &[u8]) -> Self { match aes_mode { AesMode::Aes128 => Cipher::Aes128(Box::new(aes_ctr::AesCtrZipKeyStream::< aes_ctr::Aes128, >::new(key))), AesMode::Aes192 => Cipher::Aes192(Box::new(aes_ctr::AesCtrZipKeyStream::< aes_ctr::Aes192, >::new(key))), AesMode::Aes256 => Cipher::Aes256(Box::new(aes_ctr::AesCtrZipKeyStream::< aes_ctr::Aes256, >::new(key))), } } fn crypt_in_place(&mut self, target: &mut [u8]) { match self { Self::Aes128(cipher) => cipher.crypt_in_place(target), Self::Aes192(cipher) => cipher.crypt_in_place(target), Self::Aes256(cipher) => cipher.crypt_in_place(target), } } } // An aes encrypted file starts with a salt, whose length depends on the used aes mode // followed by a 2 byte password verification value // then the variable length encrypted data // and lastly a 10 byte authentication code pub struct AesReader<R> { reader: R, aes_mode: AesMode, data_length: u64, } impl<R: Read> AesReader<R> { pub const fn new(reader: R, aes_mode: AesMode, compressed_size: u64) -> AesReader<R> { let data_length = compressed_size - (PWD_VERIFY_LENGTH + AUTH_CODE_LENGTH + aes_mode.salt_length()) as u64; Self { reader, aes_mode, data_length, } } /// Read the AES header bytes and validate the password. /// /// Even if the validation succeeds, there is still a 1 in 65536 chance that an incorrect /// password was provided. /// It isn't possible to check the authentication code in this step. This will be done after /// reading and decrypting the file. pub fn validate(mut self, password: &[u8]) -> Result<AesReaderValid<R>, ZipError> { let salt_length = self.aes_mode.salt_length(); let key_length = self.aes_mode.key_length(); let mut salt = vec![0; salt_length]; self.reader.read_exact(&mut salt)?; // next are 2 bytes used for password verification let mut pwd_verification_value = vec![0; PWD_VERIFY_LENGTH]; self.reader.read_exact(&mut pwd_verification_value)?; // derive a key from the password and salt // the length depends on the aes key length let derived_key_len = 2 * key_length + PWD_VERIFY_LENGTH; let mut derived_key: Box<[u8]> = vec![0; derived_key_len].into_boxed_slice(); // use PBKDF2 with HMAC-Sha1 to derive the key pbkdf2::pbkdf2::<Hmac<Sha1>>(password, &salt, ITERATION_COUNT, &mut derived_key) .map_err(|e| Error::new(ErrorKind::InvalidInput, e))?; let decrypt_key = &derived_key[0..key_length]; let hmac_key = &derived_key[key_length..key_length * 2]; let pwd_verify = &derived_key[derived_key_len - 2..]; // the last 2 bytes should equal the password verification value if pwd_verification_value != pwd_verify { // wrong password return Err(ZipError::InvalidPassword); } let cipher = Cipher::from_mode(self.aes_mode, decrypt_key); let hmac = Hmac::<Sha1>::new_from_slice(hmac_key).unwrap(); Ok(AesReaderValid { reader: self.reader, data_remaining: self.data_length, cipher, hmac, finalized: false, }) } /// Read the AES header bytes and returns the verification value and salt. /// /// # Returns /// /// the verification value and the salt pub fn get_verification_value_and_salt( mut self, ) -> io::Result<([u8; PWD_VERIFY_LENGTH], Vec<u8>)> { let salt_length = self.aes_mode.salt_length(); let mut salt = vec![0; salt_length]; self.reader.read_exact(&mut salt)?; // next are 2 bytes used for password verification let mut pwd_verification_value = [0; PWD_VERIFY_LENGTH]; self.reader.read_exact(&mut pwd_verification_value)?; Ok((pwd_verification_value, salt)) } } /// A reader for aes encrypted files, which has already passed the first password check. /// /// There is a 1 in 65536 chance that an invalid password passes that check. /// After the data has been read and decrypted an HMAC will be checked and provide a final means /// to check if either the password is invalid or if the data has been changed. pub struct AesReaderValid<R: Read> { reader: R, data_remaining: u64, cipher: Cipher, hmac: Hmac<Sha1>, finalized: bool, } impl<R: Read> Read for AesReaderValid<R> { /// This implementation does not fulfill all requirements set in the trait documentation. /// /// ```txt /// "If an error is returned then it must be guaranteed that no bytes were read." /// ``` /// /// Whether this applies to errors that occur while reading the encrypted data depends on the /// underlying reader. If the error occurs while verifying the HMAC, the reader might become /// practically unusable, since its position after the error is not known. fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { if self.data_remaining == 0 { return Ok(0); } // get the number of bytes to read, compare as u64 to make sure we can read more than // 2^32 bytes even on 32 bit systems. let bytes_to_read = self.data_remaining.min(buf.len() as u64) as usize; let read = self.reader.read(&mut buf[0..bytes_to_read])?; self.data_remaining -= read as u64; // Update the hmac with the encrypted data self.hmac.update(&buf[0..read]); // decrypt the data self.cipher.crypt_in_place(&mut buf[0..read]); // if there is no data left to read, check the integrity of the data if self.data_remaining == 0 { assert!( !self.finalized, "Tried to use an already finalized HMAC. This is a bug!" ); self.finalized = true; // Zip uses HMAC-Sha1-80, which only uses the first half of the hash // see https://www.winzip.com/win/en/aes_info.html#auth-faq let mut read_auth_code = [0; AUTH_CODE_LENGTH]; self.reader.read_exact(&mut read_auth_code)?; let computed_auth_code = &self.hmac.finalize_reset().into_bytes()[0..AUTH_CODE_LENGTH]; // use constant time comparison to mitigate timing attacks if !constant_time_eq(computed_auth_code, &read_auth_code) { return Err( Error::new( ErrorKind::InvalidData, "Invalid authentication code, this could be due to an invalid password or errors in the data" ) ); } } Ok(read) } } impl<R: Read> AesReaderValid<R> { /// Consumes this decoder, returning the underlying reader. pub fn into_inner(self) -> R { self.reader } } pub struct AesWriter<W> { writer: W, cipher: Cipher, hmac: Hmac<Sha1>, buffer: Zeroizing<Vec<u8>>, encrypted_file_header: Option<Vec<u8>>, } impl<W: Write> AesWriter<W> { pub fn new(writer: W, aes_mode: AesMode, password: &[u8]) -> io::Result<Self> { let salt_length = aes_mode.salt_length(); let key_length = aes_mode.key_length(); let mut encrypted_file_header = Vec::with_capacity(salt_length + 2); let mut salt = vec![0; salt_length]; rand::thread_rng().fill_bytes(&mut salt); encrypted_file_header.write_all(&salt)?; // Derive a key from the password and salt. The length depends on the aes key length let derived_key_len = 2 * key_length + PWD_VERIFY_LENGTH; let mut derived_key: Zeroizing<Vec<u8>> = Zeroizing::new(vec![0; derived_key_len]); // Use PBKDF2 with HMAC-Sha1 to derive the key. pbkdf2::pbkdf2::<Hmac<Sha1>>(password, &salt, ITERATION_COUNT, &mut derived_key) .map_err(|e| Error::new(ErrorKind::InvalidInput, e))?; let encryption_key = &derived_key[0..key_length]; let hmac_key = &derived_key[key_length..key_length * 2]; let pwd_verify = derived_key[derived_key_len - 2..].to_vec(); encrypted_file_header.write_all(&pwd_verify)?; let cipher = Cipher::from_mode(aes_mode, encryption_key); let hmac = Hmac::<Sha1>::new_from_slice(hmac_key).unwrap(); Ok(Self { writer, cipher, hmac, buffer: Default::default(), encrypted_file_header: Some(encrypted_file_header), }) } pub fn finish(mut self) -> io::Result<W> { self.write_encrypted_file_header()?; // Zip uses HMAC-Sha1-80, which only uses the first half of the hash // see https://www.winzip.com/win/en/aes_info.html#auth-faq let computed_auth_code = &self.hmac.finalize_reset().into_bytes()[0..AUTH_CODE_LENGTH]; self.writer.write_all(computed_auth_code)?; Ok(self.writer) } /// The AES encryption specification requires some metadata being written at the start of the /// file data section, but this can only be done once the extra data writing has been finished /// so we can't do it when the writer is constructed. fn write_encrypted_file_header(&mut self) -> io::Result<()> { if let Some(header) = self.encrypted_file_header.take() { self.writer.write_all(&header)?; } Ok(()) } } impl<W: Write> Write for AesWriter<W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.write_encrypted_file_header()?; // Fill the internal buffer and encrypt it in-place. self.buffer.extend_from_slice(buf); self.cipher.crypt_in_place(&mut self.buffer[..]); // Update the hmac with the encrypted data. self.hmac.update(&self.buffer[..]); // Write the encrypted buffer to the inner writer. We need to use `write_all` here as if // we only write parts of the data we can't easily reverse the keystream in the cipher // implementation. self.writer.write_all(&self.buffer[..])?; // Zeroize the backing memory before clearing the buffer to prevent cleartext data from // being left in memory. self.buffer.zeroize(); self.buffer.clear(); Ok(buf.len()) } fn flush(&mut self) -> io::Result<()> { self.writer.flush() } } #[cfg(all(test, feature = "aes-crypto"))] mod tests { use std::io::{self, Read, Write}; use crate::{ aes::{AesReader, AesWriter}, result::ZipError, types::AesMode, }; /// Checks whether `AesReader` can successfully decrypt what `AesWriter` produces. fn roundtrip(aes_mode: AesMode, password: &[u8], plaintext: &[u8]) -> Result<bool, ZipError> { let mut buf = io::Cursor::new(vec![]); let mut read_buffer = vec![]; { let mut writer = AesWriter::new(&mut buf, aes_mode, password)?; writer.write_all(plaintext)?; writer.finish()?; } // Reset cursor position to the beginning. buf.set_position(0); { let compressed_length = buf.get_ref().len() as u64; let mut reader = AesReader::new(&mut buf, aes_mode, compressed_length).validate(password)?; reader.read_to_end(&mut read_buffer)?; } Ok(plaintext == read_buffer) } #[test] fn crypt_aes_256_0_byte() { let plaintext = &[]; let password = b"some super secret password"; assert!(roundtrip(AesMode::Aes256, password, plaintext).expect("could encrypt and decrypt")); } #[test] fn crypt_aes_128_5_byte() { let plaintext = b"asdf\n"; let password = b"some super secret password"; assert!(roundtrip(AesMode::Aes128, password, plaintext).expect("could encrypt and decrypt")); } #[test] fn crypt_aes_192_5_byte() { let plaintext = b"asdf\n"; let password = b"some super secret password"; assert!(roundtrip(AesMode::Aes192, password, plaintext).expect("could encrypt and decrypt")); } #[test] fn crypt_aes_256_5_byte() { let plaintext = b"asdf\n"; let password = b"some super secret password"; assert!(roundtrip(AesMode::Aes256, password, plaintext).expect("could encrypt and decrypt")); } #[test] fn crypt_aes_128_40_byte() { let plaintext = b"Lorem ipsum dolor sit amet, consectetur\n"; let password = b"some super secret password"; assert!(roundtrip(AesMode::Aes128, password, plaintext).expect("could encrypt and decrypt")); } #[test] fn crypt_aes_192_40_byte() { let plaintext = b"Lorem ipsum dolor sit amet, consectetur\n"; let password = b"some super secret password"; assert!(roundtrip(AesMode::Aes192, password, plaintext).expect("could encrypt and decrypt")); } #[test] fn crypt_aes_256_40_byte() { let plaintext = b"Lorem ipsum dolor sit amet, consectetur\n"; let password = b"some super secret password"; assert!(roundtrip(AesMode::Aes256, password, plaintext).expect("could encrypt and decrypt")); } }
//! A counter mode (CTR) for AES to work with the encryption used in zip files. //! //! This was implemented since the zip specification requires the mode to not use a nonce and uses a //! different byte order (little endian) than NIST (big endian). //! See [AesCtrZipKeyStream] for more information. use crate::unstable::LittleEndianWriteExt; use aes::cipher::generic_array::GenericArray; use aes::cipher::{BlockEncrypt, KeyInit}; use std::{any, fmt}; /// Internal block size of an AES cipher. const AES_BLOCK_SIZE: usize = 16; /// AES-128. #[derive(Debug)] pub struct Aes128; /// AES-192 #[derive(Debug)] pub struct Aes192; /// AES-256. #[derive(Debug)] pub struct Aes256; /// An AES cipher kind. pub trait AesKind { /// Key type. type Key: AsRef<[u8]>; /// Cipher used to decrypt. type Cipher: KeyInit; } impl AesKind for Aes128 { type Key = [u8; 16]; type Cipher = aes::Aes128; } impl AesKind for Aes192 { type Key = [u8; 24]; type Cipher = aes::Aes192; } impl AesKind for Aes256 { type Key = [u8; 32]; type Cipher = aes::Aes256; } /// An AES-CTR key stream generator. /// /// Implements the slightly non-standard AES-CTR variant used by WinZip AES encryption. /// /// Typical AES-CTR implementations combine a nonce with a 64 bit counter. WinZIP AES instead uses /// no nonce and also uses a different byte order (little endian) than NIST (big endian). /// /// The stream implements the `Read` trait; encryption or decryption is performed by XOR-ing the /// bytes from the key stream with the ciphertext/plaintext. pub struct AesCtrZipKeyStream<C: AesKind> { /// Current AES counter. counter: u128, /// AES cipher instance. cipher: C::Cipher, /// Stores the currently available keystream bytes. buffer: [u8; AES_BLOCK_SIZE], /// Number of bytes already used up from `buffer`. pos: usize, } impl<C> fmt::Debug for AesCtrZipKeyStream<C> where C: AesKind, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "AesCtrZipKeyStream<{}>(counter: {})", any::type_name::<C>(), self.counter ) } } impl<C> AesCtrZipKeyStream<C> where C: AesKind, C::Cipher: KeyInit, { /// Creates a new zip variant AES-CTR key stream. /// /// # Panics /// /// This panics if `key` doesn't have the correct size for cipher `C`. pub fn new(key: &[u8]) -> AesCtrZipKeyStream<C> { AesCtrZipKeyStream { counter: 1, cipher: C::Cipher::new(GenericArray::from_slice(key)), buffer: [0u8; AES_BLOCK_SIZE], pos: AES_BLOCK_SIZE, } } } impl<C> AesCipher for AesCtrZipKeyStream<C> where C: AesKind, C::Cipher: BlockEncrypt, { /// Decrypt or encrypt `target`. #[inline] fn crypt_in_place(&mut self, mut target: &mut [u8]) { while !target.is_empty() { if self.pos == AES_BLOCK_SIZE { // Note: AES block size is always 16 bytes, same as u128. self.buffer .as_mut() .write_u128_le(self.counter) .expect("did not expect u128 le conversion to fail"); self.cipher .encrypt_block(GenericArray::from_mut_slice(&mut self.buffer)); self.counter += 1; self.pos = 0; } let target_len = target.len().min(AES_BLOCK_SIZE - self.pos); xor( &mut target[0..target_len], &self.buffer[self.pos..(self.pos + target_len)], ); target = &mut target[target_len..]; self.pos += target_len; } } } /// This trait allows using generic AES ciphers with different key sizes. pub trait AesCipher { fn crypt_in_place(&mut self, target: &mut [u8]); } /// XORs a slice in place with another slice. #[inline] fn xor(dest: &mut [u8], src: &[u8]) { assert_eq!(dest.len(), src.len()); for (lhs, rhs) in dest.iter_mut().zip(src.iter()) { *lhs ^= *rhs; } } #[cfg(test)] mod tests { use super::{Aes128, Aes192, Aes256, AesCipher, AesCtrZipKeyStream, AesKind}; use aes::cipher::{BlockEncrypt, KeyInit}; /// Checks whether `crypt_in_place` produces the correct plaintext after one use and yields the /// cipertext again after applying it again. fn roundtrip<Aes>(key: &[u8], ciphertext: &[u8], expected_plaintext: &[u8]) where Aes: AesKind, Aes::Cipher: KeyInit + BlockEncrypt, { let mut key_stream = AesCtrZipKeyStream::<Aes>::new(key); let mut plaintext = ciphertext.to_vec().into_boxed_slice(); key_stream.crypt_in_place(&mut plaintext); assert_eq!(*plaintext, *expected_plaintext); // Round-tripping should yield the ciphertext again. let mut key_stream = AesCtrZipKeyStream::<Aes>::new(key); key_stream.crypt_in_place(&mut plaintext); assert_eq!(*plaintext, *ciphertext); } #[test] #[should_panic] fn new_with_wrong_key_size() { AesCtrZipKeyStream::<Aes128>::new(&[1, 2, 3, 4, 5]); } // The data used in these tests was generated with p7zip without any compression. // It's not possible to recreate the exact same data, since a random salt is used for encryption. // `7z a -phelloworld -mem=AES256 -mx=0 aes256_40byte.zip 40byte_data.txt` #[test] fn crypt_aes_256_0_byte() { let ciphertext = []; let expected_plaintext = &[]; let key = [ 0x0b, 0xec, 0x2e, 0xf2, 0x46, 0xf0, 0x7e, 0x35, 0x16, 0x54, 0xe0, 0x98, 0x10, 0xb3, 0x18, 0x55, 0x24, 0xa3, 0x9e, 0x0e, 0x40, 0xe7, 0x92, 0xad, 0xb2, 0x8a, 0x48, 0xf4, 0x5c, 0xd0, 0xc0, 0x54, ]; roundtrip::<Aes256>(&key, &ciphertext, expected_plaintext); } #[test] fn crypt_aes_128_5_byte() { let ciphertext = [0x98, 0xa9, 0x8c, 0x26, 0x0e]; let expected_plaintext = b"asdf\n"; let key = [ 0xe0, 0x25, 0x7b, 0x57, 0x97, 0x6a, 0xa4, 0x23, 0xab, 0x94, 0xaa, 0x44, 0xfd, 0x47, 0x4f, 0xa5, ]; roundtrip::<Aes128>(&key, &ciphertext, expected_plaintext); } #[test] fn crypt_aes_192_5_byte() { let ciphertext = [0x36, 0x55, 0x5c, 0x61, 0x3c]; let expected_plaintext = b"asdf\n"; let key = [ 0xe4, 0x4a, 0x88, 0x52, 0x8f, 0xf7, 0x0b, 0x81, 0x7b, 0x75, 0xf1, 0x74, 0x21, 0x37, 0x8c, 0x90, 0xad, 0xbe, 0x4a, 0x65, 0xa8, 0x96, 0x0e, 0xcc, ]; roundtrip::<Aes192>(&key, &ciphertext, expected_plaintext); } #[test] fn crypt_aes_256_5_byte() { let ciphertext = [0xc2, 0x47, 0xc0, 0xdc, 0x56]; let expected_plaintext = b"asdf\n"; let key = [ 0x79, 0x5e, 0x17, 0xf2, 0xc6, 0x3d, 0x28, 0x9b, 0x4b, 0x4b, 0xbb, 0xa9, 0xba, 0xc9, 0xa5, 0xee, 0x3a, 0x4f, 0x0f, 0x4b, 0x29, 0xbd, 0xe9, 0xb8, 0x41, 0x9c, 0x41, 0xa5, 0x15, 0xb2, 0x86, 0xab, ]; roundtrip::<Aes256>(&key, &ciphertext, expected_plaintext); } #[test] fn crypt_aes_128_40_byte() { let ciphertext = [ 0xcf, 0x72, 0x6b, 0xa1, 0xb2, 0x0f, 0xdf, 0xaa, 0x10, 0xad, 0x9c, 0x7f, 0x6d, 0x1c, 0x8d, 0xb5, 0x16, 0x7e, 0xbb, 0x11, 0x69, 0x52, 0x8c, 0x89, 0x80, 0x32, 0xaa, 0x76, 0xa6, 0x18, 0x31, 0x98, 0xee, 0xdd, 0x22, 0x68, 0xb7, 0xe6, 0x77, 0xd2, ]; let expected_plaintext = b"Lorem ipsum dolor sit amet, consectetur\n"; let key = [ 0x43, 0x2b, 0x6d, 0xbe, 0x05, 0x76, 0x6c, 0x9e, 0xde, 0xca, 0x3b, 0xf8, 0xaf, 0x5d, 0x81, 0xb6, ]; roundtrip::<Aes128>(&key, &ciphertext, expected_plaintext); } #[test] fn crypt_aes_192_40_byte() { let ciphertext = [ 0xa6, 0xfc, 0x52, 0x79, 0x2c, 0x6c, 0xfe, 0x68, 0xb1, 0xa8, 0xb3, 0x07, 0x52, 0x8b, 0x82, 0xa6, 0x87, 0x9c, 0x72, 0x42, 0x3a, 0xf8, 0xc6, 0xa9, 0xc9, 0xfb, 0x61, 0x19, 0x37, 0xb9, 0x56, 0x62, 0xf4, 0xfc, 0x5e, 0x7a, 0xdd, 0x55, 0x0a, 0x48, ]; let expected_plaintext = b"Lorem ipsum dolor sit amet, consectetur\n"; let key = [ 0xac, 0x92, 0x41, 0xba, 0xde, 0xd9, 0x02, 0xfe, 0x40, 0x92, 0x20, 0xf6, 0x56, 0x03, 0xfe, 0xae, 0x1b, 0xba, 0x01, 0x97, 0x97, 0x79, 0xbb, 0xa6, ]; roundtrip::<Aes192>(&key, &ciphertext, expected_plaintext); } #[test] fn crypt_aes_256_40_byte() { let ciphertext = [ 0xa9, 0x99, 0xbd, 0xea, 0x82, 0x9b, 0x8f, 0x2f, 0xb7, 0x52, 0x2f, 0x6b, 0xd8, 0xf6, 0xab, 0x0e, 0x24, 0x51, 0x9e, 0x18, 0x0f, 0xc0, 0x8f, 0x54, 0x15, 0x80, 0xae, 0xbc, 0xa0, 0x5c, 0x8a, 0x11, 0x8d, 0x14, 0x7e, 0xc5, 0xb4, 0xae, 0xd3, 0x37, ]; let expected_plaintext = b"Lorem ipsum dolor sit amet, consectetur\n"; let key = [ 0x64, 0x7c, 0x7a, 0xde, 0xf0, 0xf2, 0x61, 0x49, 0x1c, 0xf1, 0xf1, 0xe3, 0x37, 0xfc, 0xe1, 0x4d, 0x4a, 0x77, 0xd4, 0xeb, 0x9e, 0x3d, 0x75, 0xce, 0x9a, 0x3e, 0x10, 0x50, 0xc2, 0x07, 0x36, 0xb6, ]; roundtrip::<Aes256>(&key, &ciphertext, expected_plaintext); } }
use std::env::var; fn main() { if var("CARGO_FEATURE_DEFLATE_MINIZ").is_ok() && var("CARGO_FEATURE__ALL_FEATURES").is_err() { println!("cargo:warning=Feature `deflate-miniz` is deprecated; replace it with `deflate`"); } }
//! Possible ZIP compression methods. use std::{fmt, io}; #[allow(deprecated)] /// Identifies the storage format used to compress a file within a ZIP archive. /// /// Each file's compression method is stored alongside it, allowing the /// contents to be read without context. /// /// When creating ZIP files, you may choose the method to use with /// [`crate::write::FileOptions::compression_method`] #[derive(Copy, Clone, PartialEq, Eq, Debug)] #[cfg_attr(fuzzing, derive(arbitrary::Arbitrary))] #[non_exhaustive] pub enum CompressionMethod { /// Store the file as is Stored, /// Compress the file using Deflate #[cfg(feature = "_deflate-any")] Deflated, /// Compress the file using Deflate64. /// Decoding deflate64 is supported but encoding deflate64 is not supported. #[cfg(feature = "deflate64")] Deflate64, /// Compress the file using BZIP2 #[cfg(feature = "bzip2")] Bzip2, /// Encrypted using AES. /// /// The actual compression method has to be taken from the AES extra data field /// or from `ZipFileData`. #[cfg(feature = "aes-crypto")] Aes, /// Compress the file using ZStandard #[cfg(feature = "zstd")] Zstd, /// Compress the file using LZMA #[cfg(feature = "lzma")] Lzma, /// Compress the file using XZ #[cfg(feature = "xz")] Xz, /// Unsupported compression method #[cfg_attr( not(fuzzing), deprecated(since = "0.5.7", note = "use the constants instead") )] Unsupported(u16), } #[allow(deprecated, missing_docs)] /// All compression methods defined for the ZIP format impl CompressionMethod { pub const STORE: Self = CompressionMethod::Stored; pub const SHRINK: Self = CompressionMethod::Unsupported(1); pub const REDUCE_1: Self = CompressionMethod::Unsupported(2); pub const REDUCE_2: Self = CompressionMethod::Unsupported(3); pub const REDUCE_3: Self = CompressionMethod::Unsupported(4); pub const REDUCE_4: Self = CompressionMethod::Unsupported(5); pub const IMPLODE: Self = CompressionMethod::Unsupported(6); #[cfg(feature = "_deflate-any")] pub const DEFLATE: Self = CompressionMethod::Deflated; #[cfg(not(feature = "_deflate-any"))] pub const DEFLATE: Self = CompressionMethod::Unsupported(8); #[cfg(feature = "deflate64")] pub const DEFLATE64: Self = CompressionMethod::Deflate64; #[cfg(not(feature = "deflate64"))] pub const DEFLATE64: Self = CompressionMethod::Unsupported(9); pub const PKWARE_IMPLODE: Self = CompressionMethod::Unsupported(10); #[cfg(feature = "bzip2")] pub const BZIP2: Self = CompressionMethod::Bzip2; #[cfg(not(feature = "bzip2"))] pub const BZIP2: Self = CompressionMethod::Unsupported(12); #[cfg(not(feature = "lzma"))] pub const LZMA: Self = CompressionMethod::Unsupported(14); #[cfg(feature = "lzma")] pub const LZMA: Self = CompressionMethod::Lzma; pub const IBM_ZOS_CMPSC: Self = CompressionMethod::Unsupported(16); pub const IBM_TERSE: Self = CompressionMethod::Unsupported(18); pub const ZSTD_DEPRECATED: Self = CompressionMethod::Unsupported(20); #[cfg(feature = "zstd")] pub const ZSTD: Self = CompressionMethod::Zstd; #[cfg(not(feature = "zstd"))] pub const ZSTD: Self = CompressionMethod::Unsupported(93); pub const MP3: Self = CompressionMethod::Unsupported(94); #[cfg(feature = "xz")] pub const XZ: Self = CompressionMethod::Xz; #[cfg(not(feature = "xz"))] pub const XZ: Self = CompressionMethod::Unsupported(95); pub const JPEG: Self = CompressionMethod::Unsupported(96); pub const WAVPACK: Self = CompressionMethod::Unsupported(97); pub const PPMD: Self = CompressionMethod::Unsupported(98); #[cfg(feature = "aes-crypto")] pub const AES: Self = CompressionMethod::Aes; #[cfg(not(feature = "aes-crypto"))] pub const AES: Self = CompressionMethod::Unsupported(99); } impl CompressionMethod { pub(crate) const fn parse_from_u16(val: u16) -> Self { match val { 0 => CompressionMethod::Stored, #[cfg(feature = "_deflate-any")] 8 => CompressionMethod::Deflated, #[cfg(feature = "deflate64")] 9 => CompressionMethod::Deflate64, #[cfg(feature = "bzip2")] 12 => CompressionMethod::Bzip2, #[cfg(feature = "lzma")] 14 => CompressionMethod::Lzma, #[cfg(feature = "xz")] 95 => CompressionMethod::Xz, #[cfg(feature = "zstd")] 93 => CompressionMethod::Zstd, #[cfg(feature = "aes-crypto")] 99 => CompressionMethod::Aes, #[allow(deprecated)] v => CompressionMethod::Unsupported(v), } } /// Converts a u16 to its corresponding CompressionMethod #[deprecated( since = "0.5.7", note = "use a constant to construct a compression method" )] pub const fn from_u16(val: u16) -> CompressionMethod { Self::parse_from_u16(val) } pub(crate) const fn serialize_to_u16(self) -> u16 { match self { CompressionMethod::Stored => 0, #[cfg(feature = "_deflate-any")] CompressionMethod::Deflated => 8, #[cfg(feature = "deflate64")] CompressionMethod::Deflate64 => 9, #[cfg(feature = "bzip2")] CompressionMethod::Bzip2 => 12, #[cfg(feature = "aes-crypto")] CompressionMethod::Aes => 99, #[cfg(feature = "zstd")] CompressionMethod::Zstd => 93, #[cfg(feature = "lzma")] CompressionMethod::Lzma => 14, #[cfg(feature = "xz")] CompressionMethod::Xz => 95, #[allow(deprecated)] CompressionMethod::Unsupported(v) => v, } } /// Converts a CompressionMethod to a u16 #[deprecated( since = "0.5.7", note = "to match on other compression methods, use a constant" )] pub const fn to_u16(self) -> u16 { self.serialize_to_u16() } } impl Default for CompressionMethod { fn default() -> Self { #[cfg(feature = "_deflate-any")] return CompressionMethod::Deflated; #[cfg(not(feature = "_deflate-any"))] return CompressionMethod::Stored; } } impl fmt::Display for CompressionMethod { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // Just duplicate what the Debug format looks like, i.e, the enum key: write!(f, "{self:?}") } } /// The compression methods which have been implemented. pub const SUPPORTED_COMPRESSION_METHODS: &[CompressionMethod] = &[ CompressionMethod::Stored, #[cfg(feature = "_deflate-any")] CompressionMethod::Deflated, #[cfg(feature = "deflate64")] CompressionMethod::Deflate64, #[cfg(feature = "bzip2")] CompressionMethod::Bzip2, #[cfg(feature = "zstd")] CompressionMethod::Zstd, ]; pub(crate) enum Decompressor<R: io::BufRead> { Stored(R), #[cfg(feature = "_deflate-any")] Deflated(flate2::bufread::DeflateDecoder<R>), #[cfg(feature = "deflate64")] Deflate64(deflate64::Deflate64Decoder<R>), #[cfg(feature = "bzip2")] Bzip2(bzip2::bufread::BzDecoder<R>), #[cfg(feature = "zstd")] Zstd(zstd::Decoder<'static, R>), #[cfg(feature = "lzma")] Lzma(Box<crate::read::lzma::LzmaDecoder<R>>), #[cfg(feature = "xz")] Xz(crate::read::xz::XzDecoder<R>), } impl<R: io::BufRead> io::Read for Decompressor<R> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { match self { Decompressor::Stored(r) => r.read(buf), #[cfg(feature = "_deflate-any")] Decompressor::Deflated(r) => r.read(buf), #[cfg(feature = "deflate64")] Decompressor::Deflate64(r) => r.read(buf), #[cfg(feature = "bzip2")] Decompressor::Bzip2(r) => r.read(buf), #[cfg(feature = "zstd")] Decompressor::Zstd(r) => r.read(buf), #[cfg(feature = "lzma")] Decompressor::Lzma(r) => r.read(buf), #[cfg(feature = "xz")] Decompressor::Xz(r) => r.read(buf), } } } impl<R: io::BufRead> Decompressor<R> { pub fn new(reader: R, compression_method: CompressionMethod) -> crate::result::ZipResult<Self> { Ok(match compression_method { CompressionMethod::Stored => Decompressor::Stored(reader), #[cfg(feature = "_deflate-any")] CompressionMethod::Deflated => { Decompressor::Deflated(flate2::bufread::DeflateDecoder::new(reader)) } #[cfg(feature = "deflate64")] CompressionMethod::Deflate64 => { Decompressor::Deflate64(deflate64::Deflate64Decoder::with_buffer(reader)) } #[cfg(feature = "bzip2")] CompressionMethod::Bzip2 => Decompressor::Bzip2(bzip2::bufread::BzDecoder::new(reader)), #[cfg(feature = "zstd")] CompressionMethod::Zstd => Decompressor::Zstd(zstd::Decoder::with_buffer(reader)?), #[cfg(feature = "lzma")] CompressionMethod::Lzma => { Decompressor::Lzma(Box::new(crate::read::lzma::LzmaDecoder::new(reader))) } #[cfg(feature = "xz")] CompressionMethod::Xz => Decompressor::Xz(crate::read::xz::XzDecoder::new(reader)), _ => { return Err(crate::result::ZipError::UnsupportedArchive( "Compression method not supported", )) } }) } /// Consumes this decoder, returning the underlying reader. pub fn into_inner(self) -> R { match self { Decompressor::Stored(r) => r, #[cfg(feature = "_deflate-any")] Decompressor::Deflated(r) => r.into_inner(), #[cfg(feature = "deflate64")] Decompressor::Deflate64(r) => r.into_inner(), #[cfg(feature = "bzip2")] Decompressor::Bzip2(r) => r.into_inner(), #[cfg(feature = "zstd")] Decompressor::Zstd(r) => r.finish(), #[cfg(feature = "lzma")] Decompressor::Lzma(r) => r.into_inner(), #[cfg(feature = "xz")] Decompressor::Xz(r) => r.into_inner(), } } } #[cfg(test)] mod test { use super::{CompressionMethod, SUPPORTED_COMPRESSION_METHODS}; #[test] fn from_eq_to() { for v in 0..(u16::MAX as u32 + 1) { let from = CompressionMethod::parse_from_u16(v as u16); let to = from.serialize_to_u16() as u32; assert_eq!(v, to); } } #[test] fn to_eq_from() { fn check_match(method: CompressionMethod) { let to = method.serialize_to_u16(); let from = CompressionMethod::parse_from_u16(to); let back = from.serialize_to_u16(); assert_eq!(to, back); } for &method in SUPPORTED_COMPRESSION_METHODS { check_match(method); } } #[test] fn to_display_fmt() { fn check_match(method: CompressionMethod) { let debug_str = format!("{method:?}"); let display_str = format!("{method}"); assert_eq!(debug_str, display_str); } for &method in SUPPORTED_COMPRESSION_METHODS { check_match(method); } } }
//! Convert a string in IBM codepage 437 to UTF-8 /// Trait to convert IBM codepage 437 to the target type pub trait FromCp437 { /// Target type type Target; /// Function that does the conversion from cp437. /// Generally allocations will be avoided if all data falls into the ASCII range. #[allow(clippy::wrong_self_convention)] fn from_cp437(self) -> Self::Target; } impl<'a> FromCp437 for &'a [u8] { type Target = ::std::borrow::Cow<'a, str>; fn from_cp437(self) -> Self::Target { if self.iter().all(|c| *c < 0x80) { ::std::str::from_utf8(self).unwrap().into() } else { self.iter().map(|c| to_char(*c)).collect::<String>().into() } } } impl FromCp437 for Box<[u8]> { type Target = Box<str>; fn from_cp437(self) -> Self::Target { if self.iter().all(|c| *c < 0x80) { String::from_utf8(self.into()).unwrap() } else { self.iter().copied().map(to_char).collect() } .into_boxed_str() } } fn to_char(input: u8) -> char { let output = match input { 0x00..=0x7f => input as u32, 0x80 => 0x00c7, 0x81 => 0x00fc, 0x82 => 0x00e9, 0x83 => 0x00e2, 0x84 => 0x00e4, 0x85 => 0x00e0, 0x86 => 0x00e5, 0x87 => 0x00e7, 0x88 => 0x00ea, 0x89 => 0x00eb, 0x8a => 0x00e8, 0x8b => 0x00ef, 0x8c => 0x00ee, 0x8d => 0x00ec, 0x8e => 0x00c4, 0x8f => 0x00c5, 0x90 => 0x00c9, 0x91 => 0x00e6, 0x92 => 0x00c6, 0x93 => 0x00f4, 0x94 => 0x00f6, 0x95 => 0x00f2, 0x96 => 0x00fb, 0x97 => 0x00f9, 0x98 => 0x00ff, 0x99 => 0x00d6, 0x9a => 0x00dc, 0x9b => 0x00a2, 0x9c => 0x00a3, 0x9d => 0x00a5, 0x9e => 0x20a7, 0x9f => 0x0192, 0xa0 => 0x00e1, 0xa1 => 0x00ed, 0xa2 => 0x00f3, 0xa3 => 0x00fa, 0xa4 => 0x00f1, 0xa5 => 0x00d1, 0xa6 => 0x00aa, 0xa7 => 0x00ba, 0xa8 => 0x00bf, 0xa9 => 0x2310, 0xaa => 0x00ac, 0xab => 0x00bd, 0xac => 0x00bc, 0xad => 0x00a1, 0xae => 0x00ab, 0xaf => 0x00bb, 0xb0 => 0x2591, 0xb1 => 0x2592, 0xb2 => 0x2593, 0xb3 => 0x2502, 0xb4 => 0x2524, 0xb5 => 0x2561, 0xb6 => 0x2562, 0xb7 => 0x2556, 0xb8 => 0x2555, 0xb9 => 0x2563, 0xba => 0x2551, 0xbb => 0x2557, 0xbc => 0x255d, 0xbd => 0x255c, 0xbe => 0x255b, 0xbf => 0x2510, 0xc0 => 0x2514, 0xc1 => 0x2534, 0xc2 => 0x252c, 0xc3 => 0x251c, 0xc4 => 0x2500, 0xc5 => 0x253c, 0xc6 => 0x255e, 0xc7 => 0x255f, 0xc8 => 0x255a, 0xc9 => 0x2554, 0xca => 0x2569, 0xcb => 0x2566, 0xcc => 0x2560, 0xcd => 0x2550, 0xce => 0x256c, 0xcf => 0x2567, 0xd0 => 0x2568, 0xd1 => 0x2564, 0xd2 => 0x2565, 0xd3 => 0x2559, 0xd4 => 0x2558, 0xd5 => 0x2552, 0xd6 => 0x2553, 0xd7 => 0x256b, 0xd8 => 0x256a, 0xd9 => 0x2518, 0xda => 0x250c, 0xdb => 0x2588, 0xdc => 0x2584, 0xdd => 0x258c, 0xde => 0x2590, 0xdf => 0x2580, 0xe0 => 0x03b1, 0xe1 => 0x00df, 0xe2 => 0x0393, 0xe3 => 0x03c0, 0xe4 => 0x03a3, 0xe5 => 0x03c3, 0xe6 => 0x00b5, 0xe7 => 0x03c4, 0xe8 => 0x03a6, 0xe9 => 0x0398, 0xea => 0x03a9, 0xeb => 0x03b4, 0xec => 0x221e, 0xed => 0x03c6, 0xee => 0x03b5, 0xef => 0x2229, 0xf0 => 0x2261, 0xf1 => 0x00b1, 0xf2 => 0x2265, 0xf3 => 0x2264, 0xf4 => 0x2320, 0xf5 => 0x2321, 0xf6 => 0x00f7, 0xf7 => 0x2248, 0xf8 => 0x00b0, 0xf9 => 0x2219, 0xfa => 0x00b7, 0xfb => 0x221a, 0xfc => 0x207f, 0xfd => 0x00b2, 0xfe => 0x25a0, 0xff => 0x00a0, }; ::std::char::from_u32(output).unwrap() } #[cfg(test)] mod test { #[test] fn to_char_valid() { for i in 0x00_u32..0x100 { super::to_char(i as u8); } } #[test] fn ascii() { for i in 0x00..0x80 { assert_eq!(super::to_char(i), i as char); } } #[test] #[allow(unknown_lints)] // invalid_from_utf8 was added in rust 1.72 #[allow(invalid_from_utf8)] fn example_slice() { use super::FromCp437; let data = b"Cura\x87ao"; assert!(::std::str::from_utf8(data).is_err()); assert_eq!(data.from_cp437(), "Curaçao"); } #[test] fn example_vec() { use super::FromCp437; let data = vec![0xCC, 0xCD, 0xCD, 0xB9]; assert!(String::from_utf8(data.clone()).is_err()); assert_eq!(&*data.from_cp437(), "╠══╣"); } }
//! Helper module to compute a CRC32 checksum use std::io; use std::io::prelude::*; use crc32fast::Hasher; /// Reader that validates the CRC32 when it reaches the EOF. pub struct Crc32Reader<R> { inner: R, hasher: Hasher, check: u32, /// Signals if `inner` stores aes encrypted data. /// AE-2 encrypted data doesn't use crc and sets the value to 0. enabled: bool, } impl<R> Crc32Reader<R> { /// Get a new Crc32Reader which checks the inner reader against checksum. /// The check is disabled if `ae2_encrypted == true`. pub(crate) fn new(inner: R, checksum: u32, ae2_encrypted: bool) -> Crc32Reader<R> { Crc32Reader { inner, hasher: Hasher::new(), check: checksum, enabled: !ae2_encrypted, } } fn check_matches(&self) -> bool { self.check == self.hasher.clone().finalize() } pub fn into_inner(self) -> R { self.inner } } #[cold] fn invalid_checksum() -> io::Error { io::Error::new(io::ErrorKind::InvalidData, "Invalid checksum") } impl<R: Read> Read for Crc32Reader<R> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { let count = self.inner.read(buf)?; if self.enabled { if count == 0 && !buf.is_empty() && !self.check_matches() { return Err(invalid_checksum()); } self.hasher.update(&buf[..count]); } Ok(count) } fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> { let start = buf.len(); let n = self.inner.read_to_end(buf)?; if self.enabled { self.hasher.update(&buf[start..]); if !self.check_matches() { return Err(invalid_checksum()); } } Ok(n) } fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> { let start = buf.len(); let n = self.inner.read_to_string(buf)?; if self.enabled { self.hasher.update(&buf.as_bytes()[start..]); if !self.check_matches() { return Err(invalid_checksum()); } } Ok(n) } } #[cfg(test)] mod test { use super::*; #[test] fn test_empty_reader() { let data: &[u8] = b""; let mut buf = [0; 1]; let mut reader = Crc32Reader::new(data, 0, false); assert_eq!(reader.read(&mut buf).unwrap(), 0); let mut reader = Crc32Reader::new(data, 1, false); assert!(reader .read(&mut buf) .unwrap_err() .to_string() .contains("Invalid checksum")); } #[test] fn test_byte_by_byte() { let data: &[u8] = b"1234"; let mut buf = [0; 1]; let mut reader = Crc32Reader::new(data, 0x9be3e0a3, false); assert_eq!(reader.read(&mut buf).unwrap(), 1); assert_eq!(reader.read(&mut buf).unwrap(), 1); assert_eq!(reader.read(&mut buf).unwrap(), 1); assert_eq!(reader.read(&mut buf).unwrap(), 1); assert_eq!(reader.read(&mut buf).unwrap(), 0); // Can keep reading 0 bytes after the end assert_eq!(reader.read(&mut buf).unwrap(), 0); } #[test] fn test_zero_read() { let data: &[u8] = b"1234"; let mut buf = [0; 5]; let mut reader = Crc32Reader::new(data, 0x9be3e0a3, false); assert_eq!(reader.read(&mut buf[..0]).unwrap(), 0); assert_eq!(reader.read(&mut buf).unwrap(), 4); } }
use crate::result::{ZipError, ZipResult}; use crate::unstable::LittleEndianReadExt; use std::io::Read; /// extended timestamp, as described in <https://libzip.org/specifications/extrafld.txt> #[derive(Debug, Clone)] pub struct ExtendedTimestamp { mod_time: Option<u32>, ac_time: Option<u32>, cr_time: Option<u32>, } impl ExtendedTimestamp { /// creates an extended timestamp struct by reading the required bytes from the reader. /// /// This method assumes that the length has already been read, therefore /// it must be passed as an argument pub fn try_from_reader<R>(reader: &mut R, len: u16) -> ZipResult<Self> where R: Read, { let mut flags = [0u8]; reader.read_exact(&mut flags)?; let flags = flags[0]; // the `flags` field refers to the local headers and might not correspond // to the len field. If the length field is 1+4, we assume that only // the modification time has been set // > Those times that are present will appear in the order indicated, but // > any combination of times may be omitted. (Creation time may be // > present without access time, for example.) TSize should equal // > (1 + 4*(number of set bits in Flags)), as the block is currently // > defined. if len != 5 && len as u32 != 1 + 4 * flags.count_ones() { //panic!("found len {len} and flags {flags:08b}"); return Err(ZipError::UnsupportedArchive( "flags and len don't match in extended timestamp field", )); } if flags & 0b11111000 != 0 { return Err(ZipError::UnsupportedArchive( "found unsupported timestamps in the extended timestamp header", )); } let mod_time = if (flags & 0b00000001u8 == 0b00000001u8) || len == 5 { Some(reader.read_u32_le()?) } else { None }; let ac_time = if flags & 0b00000010u8 == 0b00000010u8 && len > 5 { Some(reader.read_u32_le()?) } else { None }; let cr_time = if flags & 0b00000100u8 == 0b00000100u8 && len > 5 { Some(reader.read_u32_le()?) } else { None }; Ok(Self { mod_time, ac_time, cr_time, }) } /// returns the last modification timestamp, if defined, as UNIX epoch seconds pub fn mod_time(&self) -> Option<u32> { self.mod_time } /// returns the last access timestamp, if defined, as UNIX epoch seconds pub fn ac_time(&self) -> Option<u32> { self.ac_time } /// returns the creation timestamp, if defined, as UNIX epoch seconds pub fn cr_time(&self) -> Option<u32> { self.cr_time } }
//! types for extra fields /// marker trait to denote the place where this extra field has been stored pub trait ExtraFieldVersion {} /// use this to mark extra fields specified in a local header #[derive(Debug, Clone)] pub struct LocalHeaderVersion; /// use this to mark extra fields specified in the central header #[derive(Debug, Clone)] pub struct CentralHeaderVersion; impl ExtraFieldVersion for LocalHeaderVersion {} impl ExtraFieldVersion for CentralHeaderVersion {} mod extended_timestamp; mod zipinfo_utf8; pub use extended_timestamp::*; pub use zipinfo_utf8::*; /// contains one extra field #[derive(Debug, Clone)] pub enum ExtraField { /// extended timestamp, as described in <https://libzip.org/specifications/extrafld.txt> ExtendedTimestamp(ExtendedTimestamp), }
use crate::result::{ZipError, ZipResult}; use crate::unstable::LittleEndianReadExt; use core::mem::size_of; use std::io::Read; /// Info-ZIP Unicode Path Extra Field (0x7075) or Unicode Comment Extra Field (0x6375), as /// specified in APPNOTE 4.6.8 and 4.6.9 #[derive(Clone, Debug)] pub struct UnicodeExtraField { crc32: u32, content: Box<[u8]>, } impl UnicodeExtraField { /// Verifies the checksum and returns the content. pub fn unwrap_valid(self, ascii_field: &[u8]) -> ZipResult<Box<[u8]>> { let mut crc32 = crc32fast::Hasher::new(); crc32.update(ascii_field); let actual_crc32 = crc32.finalize(); if self.crc32 != actual_crc32 { return Err(ZipError::InvalidArchive( "CRC32 checksum failed on Unicode extra field", )); } Ok(self.content) } } impl UnicodeExtraField { pub(crate) fn try_from_reader<R: Read>(reader: &mut R, len: u16) -> ZipResult<Self> { // Read and discard version byte reader.read_exact(&mut [0u8])?; let crc32 = reader.read_u32_le()?; let content_len = (len as usize) .checked_sub(size_of::<u8>() + size_of::<u32>()) .ok_or(ZipError::InvalidArchive("Unicode extra field is too small"))?; let mut content = vec![0u8; content_len].into_boxed_slice(); reader.read_exact(&mut content)?; Ok(Self { crc32, content }) } }
//! A library for reading and writing ZIP archives. //! ZIP is a format designed for cross-platform file "archiving". //! That is, storing a collection of files in a single datastream //! to make them easier to share between computers. //! Additionally, ZIP is able to compress and encrypt files in its //! archives. //! //! The current implementation is based on [PKWARE's APPNOTE.TXT v6.3.9](https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT) //! //! --- //! //! [`zip`](`crate`) has support for the most common ZIP archives found in common use. //! However, in special cases, //! there are some zip archives that are difficult to read or write. //! //! This is a list of supported features: //! //! | | Reading | Writing | //! | ------- | ------ | ------- | //! | Stored | ✅ | ✅ | //! | Deflate | ✅ [->](`crate::ZipArchive::by_name`) | ✅ [->](`crate::write::FileOptions::compression_method`) | //! | Deflate64 | ✅ | | //! | Bzip2 | ✅ | ✅ | //! | ZStandard | ✅ | ✅ | //! | LZMA | ✅ | | //! | XZ | ✅ | | //! | AES encryption | ✅ | ✅ | //! | ZipCrypto deprecated encryption | ✅ | ✅ | //! //! #![cfg_attr(docsrs, feature(doc_auto_cfg))] #![warn(missing_docs)] #![allow(unexpected_cfgs)] // Needed for cfg(fuzzing) on nightly as of 2024-05-06 pub use crate::compression::{CompressionMethod, SUPPORTED_COMPRESSION_METHODS}; pub use crate::read::HasZipMetadata; pub use crate::read::ZipArchive; pub use crate::spec::{ZIP64_BYTES_THR, ZIP64_ENTRY_THR}; pub use crate::types::{AesMode, DateTime}; pub use crate::write::ZipWriter; #[cfg(feature = "aes-crypto")] mod aes; #[cfg(feature = "aes-crypto")] mod aes_ctr; mod compression; mod cp437; mod crc32; pub mod extra_fields; pub mod read; pub mod result; mod spec; mod types; pub mod write; mod zipcrypto; pub use extra_fields::ExtraField; #[doc = "Unstable APIs\n\ \ All APIs accessible by importing this module are unstable; They may be changed in patch \ releases. You MUST use an exact version specifier in `Cargo.toml`, to indicate the version of this \ API you're using:\n\ \ ```toml\n [dependencies]\n zip = \"="] #[doc=env!("CARGO_PKG_VERSION")] #[doc = "\"\n\ ```"] pub mod unstable;
/// Configuration for reading ZIP archives. #[repr(transparent)] #[derive(Debug, Default, Clone, Copy)] pub struct Config { /// An offset into the reader to use to find the start of the archive. pub archive_offset: ArchiveOffset, } /// The offset of the start of the archive from the beginning of the reader. #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] pub enum ArchiveOffset { /// Try to detect the archive offset automatically. /// /// This will look at the central directory specified by `FromCentralDirectory` for a header. /// If missing, this will behave as if `None` were specified. #[default] Detect, /// Use the central directory length and offset to determine the start of the archive. #[deprecated(since = "2.3.0", note = "use `Detect` instead")] FromCentralDirectory, /// Specify a fixed archive offset. Known(u64), }
use lzma_rs::decompress::{Options, Stream, UnpackedSize}; use std::collections::VecDeque; use std::io::{BufRead, Error, ErrorKind, Read, Result, Write}; const OPTIONS: Options = Options { unpacked_size: UnpackedSize::ReadFromHeader, memlimit: None, allow_incomplete: true, }; #[derive(Debug)] pub struct LzmaDecoder<R> { compressed_reader: R, stream: Stream<VecDeque<u8>>, } impl<R: Read> LzmaDecoder<R> { pub fn new(inner: R) -> Self { LzmaDecoder { compressed_reader: inner, stream: Stream::new_with_options(&OPTIONS, VecDeque::new()), } } pub fn into_inner(self) -> R { self.compressed_reader } } impl<R: BufRead> Read for LzmaDecoder<R> { fn read(&mut self, buf: &mut [u8]) -> Result<usize> { let mut bytes_read = self .stream .get_output_mut() .ok_or(Error::new(ErrorKind::InvalidData, "Invalid LZMA stream"))? .read(buf)?; while bytes_read < buf.len() { let compressed_bytes = self.compressed_reader.fill_buf()?; if compressed_bytes.is_empty() { break; } self.stream.write_all(compressed_bytes)?; bytes_read += self .stream .get_output_mut() .unwrap() .read(&mut buf[bytes_read..])?; } Ok(bytes_read) } }
use std::io::{Read, Seek, SeekFrom}; use memchr::memmem::{Finder, FinderRev}; use crate::result::ZipResult; pub trait FinderDirection<'a> { fn new(needle: &'a [u8]) -> Self; fn reset_cursor(bounds: (u64, u64), window_size: usize) -> u64; fn scope_window(window: &[u8], mid_window_offset: usize) -> (&[u8], usize); fn needle(&self) -> &[u8]; fn find(&self, haystack: &[u8]) -> Option<usize>; fn move_cursor(&self, cursor: u64, bounds: (u64, u64), window_size: usize) -> Option<u64>; fn move_scope(&self, offset: usize) -> usize; } pub struct Forward<'a>(Finder<'a>); impl<'a> FinderDirection<'a> for Forward<'a> { fn new(needle: &'a [u8]) -> Self { Self(Finder::new(needle)) } fn reset_cursor((start_inclusive, _): (u64, u64), _: usize) -> u64 { start_inclusive } fn scope_window(window: &[u8], mid_window_offset: usize) -> (&[u8], usize) { (&window[mid_window_offset..], mid_window_offset) } fn find(&self, haystack: &[u8]) -> Option<usize> { self.0.find(haystack) } fn needle(&self) -> &[u8] { self.0.needle() } fn move_cursor(&self, cursor: u64, bounds: (u64, u64), window_size: usize) -> Option<u64> { let magic_overlap = self.needle().len().saturating_sub(1) as u64; let next = cursor.saturating_add(window_size as u64 - magic_overlap); if next >= bounds.1 { None } else { Some(next) } } fn move_scope(&self, offset: usize) -> usize { offset + self.needle().len() } } pub struct Backwards<'a>(FinderRev<'a>); impl<'a> FinderDirection<'a> for Backwards<'a> { fn new(needle: &'a [u8]) -> Self { Self(FinderRev::new(needle)) } fn reset_cursor(bounds: (u64, u64), window_size: usize) -> u64 { bounds .1 .saturating_sub(window_size as u64) .clamp(bounds.0, bounds.1) } fn scope_window(window: &[u8], mid_window_offset: usize) -> (&[u8], usize) { (&window[..mid_window_offset], 0) } fn find(&self, haystack: &[u8]) -> Option<usize> { self.0.rfind(haystack) } fn needle(&self) -> &[u8] { self.0.needle() } fn move_cursor(&self, cursor: u64, bounds: (u64, u64), window_size: usize) -> Option<u64> { let magic_overlap = self.needle().len().saturating_sub(1) as u64; if cursor <= bounds.0 { None } else { Some( cursor .saturating_add(magic_overlap) .saturating_sub(window_size as u64) .clamp(bounds.0, bounds.1), ) } } fn move_scope(&self, offset: usize) -> usize { offset } } /// A utility for finding magic symbols from the end of a seekable reader. /// /// Can be repurposed to recycle the internal buffer. pub struct MagicFinder<Direction> { buffer: Box<[u8]>, pub(self) finder: Direction, cursor: u64, mid_buffer_offset: Option<usize>, bounds: (u64, u64), } impl<'a, T: FinderDirection<'a>> MagicFinder<T> { /// Create a new magic bytes finder to look within specific bounds. pub fn new(magic_bytes: &'a [u8], start_inclusive: u64, end_exclusive: u64) -> Self { const BUFFER_SIZE: usize = 2048; // Smaller buffer size would be unable to locate bytes. // Equal buffer size would stall (the window could not be moved). debug_assert!(BUFFER_SIZE >= magic_bytes.len()); Self { buffer: vec![0; BUFFER_SIZE].into_boxed_slice(), finder: T::new(magic_bytes), cursor: T::reset_cursor((start_inclusive, end_exclusive), BUFFER_SIZE), mid_buffer_offset: None, bounds: (start_inclusive, end_exclusive), } } /// Repurpose the finder for different bytes or bounds. pub fn repurpose(&mut self, magic_bytes: &'a [u8], bounds: (u64, u64)) -> &mut Self { debug_assert!(self.buffer.len() >= magic_bytes.len()); self.finder = T::new(magic_bytes); self.cursor = T::reset_cursor(bounds, self.buffer.len()); self.bounds = bounds; // Reset the mid-buffer offset, to invalidate buffer content. self.mid_buffer_offset = None; self } /// Find the next magic bytes in the direction specified in the type. pub fn next<R: Read + Seek>(&mut self, reader: &mut R) -> ZipResult<Option<u64>> { loop { if self.cursor < self.bounds.0 || self.cursor >= self.bounds.1 { // The finder is consumed break; } /* Position the window and ensure correct length */ let window_start = self.cursor; let window_end = self .cursor .saturating_add(self.buffer.len() as u64) .min(self.bounds.1); if window_end <= window_start { // Short-circuit on zero-sized windows to prevent loop break; } let window = &mut self.buffer[..(window_end - window_start) as usize]; if self.mid_buffer_offset.is_none() { reader.seek(SeekFrom::Start(window_start))?; reader.read_exact(window)?; } let (window, window_start_offset) = match self.mid_buffer_offset { Some(mid_buffer_offset) => T::scope_window(window, mid_buffer_offset), None => (&*window, 0usize), }; if let Some(offset) = self.finder.find(window) { let magic_pos = window_start + window_start_offset as u64 + offset as u64; reader.seek(SeekFrom::Start(magic_pos))?; self.mid_buffer_offset = Some(self.finder.move_scope(window_start_offset + offset)); return Ok(Some(magic_pos)); } self.mid_buffer_offset = None; match self .finder .move_cursor(self.cursor, self.bounds, self.buffer.len()) { Some(new_cursor) => { self.cursor = new_cursor; } None => { // Destroy the finder when we've reached the end of the bounds. self.bounds.0 = self.bounds.1; break; } } } Ok(None) } } /// A magic bytes finder with an optimistic guess that is tried before /// the inner finder begins searching from end. This enables much faster /// lookup in files without appended junk, because the magic bytes will be /// found directly. /// /// The guess can be marked as mandatory to produce an error. This is useful /// if the ArchiveOffset is known and auto-detection is not desired. pub struct OptimisticMagicFinder<Direction> { inner: MagicFinder<Direction>, initial_guess: Option<(u64, bool)>, } /// This is a temporary restriction, to avoid heap allocation in [`Self::next_back`]. /// /// We only use magic bytes of size 4 at the moment. const STACK_BUFFER_SIZE: usize = 8; impl<'a, Direction: FinderDirection<'a>> OptimisticMagicFinder<Direction> { /// Create a new empty optimistic magic bytes finder. pub fn new_empty() -> Self { Self { inner: MagicFinder::new(&[], 0, 0), initial_guess: None, } } /// Repurpose the finder for different bytes, bounds and initial guesses. pub fn repurpose( &mut self, magic_bytes: &'a [u8], bounds: (u64, u64), initial_guess: Option<(u64, bool)>, ) -> &mut Self { debug_assert!(magic_bytes.len() <= STACK_BUFFER_SIZE); self.inner.repurpose(magic_bytes, bounds); self.initial_guess = initial_guess; self } /// Equivalent to `next_back`, with an optional initial guess attempted before /// proceeding with reading from the back of the reader. pub fn next<R: Read + Seek>(&mut self, reader: &mut R) -> ZipResult<Option<u64>> { if let Some((v, mandatory)) = self.initial_guess { reader.seek(SeekFrom::Start(v))?; let mut buffer = [0; STACK_BUFFER_SIZE]; let buffer = &mut buffer[..self.inner.finder.needle().len()]; // Attempt to match only if there's enough space for the needle if v.saturating_add(buffer.len() as u64) <= self.inner.bounds.1 { reader.read_exact(buffer)?; // If a match is found, yield it. if self.inner.finder.needle() == buffer { self.initial_guess.take(); reader.seek(SeekFrom::Start(v))?; return Ok(Some(v)); } } // If a match is not found, but the initial guess was mandatory, return an error. if mandatory { return Ok(None); } // If the initial guess was not mandatory, remove it, as it was not found. self.initial_guess.take(); } self.inner.next(reader) } }
use std::fs; use std::io::{self, Read}; use std::path::{Path, PathBuf}; use super::{ central_header_to_zip_file_inner, read_zipfile_from_stream, ZipCentralEntryBlock, ZipError, ZipFile, ZipFileData, ZipResult, }; use crate::spec::FixedSizeBlock; /// Stream decoder for zip. #[derive(Debug)] pub struct ZipStreamReader<R>(R); impl<R> ZipStreamReader<R> { /// Create a new ZipStreamReader pub const fn new(reader: R) -> Self { Self(reader) } } impl<R: Read> ZipStreamReader<R> { fn parse_central_directory(&mut self) -> ZipResult<ZipStreamFileMetadata> { // Give archive_offset and central_header_start dummy value 0, since // they are not used in the output. let archive_offset = 0; let central_header_start = 0; // Parse central header let block = ZipCentralEntryBlock::parse(&mut self.0)?; let file = central_header_to_zip_file_inner( &mut self.0, archive_offset, central_header_start, block, )?; Ok(ZipStreamFileMetadata(file)) } /// Iterate over the stream and extract all file and their /// metadata. pub fn visit<V: ZipStreamVisitor>(mut self, visitor: &mut V) -> ZipResult<()> { while let Some(mut file) = read_zipfile_from_stream(&mut self.0)? { visitor.visit_file(&mut file)?; } while let Ok(metadata) = self.parse_central_directory() { visitor.visit_additional_metadata(&metadata)?; } Ok(()) } /// Extract a Zip archive into a directory, overwriting files if they /// already exist. Paths are sanitized with [`ZipFile::enclosed_name`]. /// /// Extraction is not atomic; If an error is encountered, some of the files /// may be left on disk. pub fn extract<P: AsRef<Path>>(self, directory: P) -> ZipResult<()> { struct Extractor<'a>(&'a Path); impl ZipStreamVisitor for Extractor<'_> { fn visit_file(&mut self, file: &mut ZipFile<'_>) -> ZipResult<()> { let filepath = file .enclosed_name() .ok_or(ZipError::InvalidArchive("Invalid file path"))?; let outpath = self.0.join(filepath); if file.is_dir() { fs::create_dir_all(&outpath)?; } else { if let Some(p) = outpath.parent() { fs::create_dir_all(p)?; } let mut outfile = fs::File::create(&outpath)?; io::copy(file, &mut outfile)?; } Ok(()) } #[allow(unused)] fn visit_additional_metadata( &mut self, metadata: &ZipStreamFileMetadata, ) -> ZipResult<()> { #[cfg(unix)] { let filepath = metadata .enclosed_name() .ok_or(ZipError::InvalidArchive("Invalid file path"))?; let outpath = self.0.join(filepath); use std::os::unix::fs::PermissionsExt; if let Some(mode) = metadata.unix_mode() { fs::set_permissions(outpath, fs::Permissions::from_mode(mode))?; } } Ok(()) } } self.visit(&mut Extractor(directory.as_ref())) } } /// Visitor for ZipStreamReader pub trait ZipStreamVisitor { /// * `file` - contains the content of the file and most of the metadata, /// except: /// - `comment`: set to an empty string /// - `data_start`: set to 0 /// - `external_attributes`: `unix_mode()`: will return None fn visit_file(&mut self, file: &mut ZipFile<'_>) -> ZipResult<()>; /// This function is guranteed to be called after all `visit_file`s. /// /// * `metadata` - Provides missing metadata in `visit_file`. fn visit_additional_metadata(&mut self, metadata: &ZipStreamFileMetadata) -> ZipResult<()>; } /// Additional metadata for the file. #[derive(Debug)] pub struct ZipStreamFileMetadata(ZipFileData); impl ZipStreamFileMetadata { /// Get the name of the file /// /// # Warnings /// /// It is dangerous to use this name directly when extracting an archive. /// It may contain an absolute path (`/etc/shadow`), or break out of the /// current directory (`../runtime`). Carelessly writing to these paths /// allows an attacker to craft a ZIP archive that will overwrite critical /// files. /// /// You can use the [`ZipFile::enclosed_name`] method to validate the name /// as a safe path. pub fn name(&self) -> &str { &self.0.file_name } /// Get the name of the file, in the raw (internal) byte representation. /// /// The encoding of this data is currently undefined. pub fn name_raw(&self) -> &[u8] { &self.0.file_name_raw } /// Rewrite the path, ignoring any path components with special meaning. /// /// - Absolute paths are made relative /// - [std::path::Component::ParentDir]s are ignored /// - Truncates the filename at a NULL byte /// /// This is appropriate if you need to be able to extract *something* from /// any archive, but will easily misrepresent trivial paths like /// `foo/../bar` as `foo/bar` (instead of `bar`). Because of this, /// [`ZipFile::enclosed_name`] is the better option in most scenarios. pub fn mangled_name(&self) -> PathBuf { self.0.file_name_sanitized() } /// Ensure the file path is safe to use as a [`Path`]. /// /// - It can't contain NULL bytes /// - It can't resolve to a path outside the current directory /// > `foo/../bar` is fine, `foo/../../bar` is not. /// - It can't be an absolute path /// /// This will read well-formed ZIP files correctly, and is resistant /// to path-based exploits. It is recommended over /// [`ZipFile::mangled_name`]. pub fn enclosed_name(&self) -> Option<PathBuf> { self.0.enclosed_name() } /// Returns whether the file is actually a directory pub fn is_dir(&self) -> bool { self.name() .chars() .next_back() .is_some_and(|c| c == '/' || c == '\\') } /// Returns whether the file is a regular file pub fn is_file(&self) -> bool { !self.is_dir() } /// Get the comment of the file pub fn comment(&self) -> &str { &self.0.file_comment } /// Get unix mode for the file pub const fn unix_mode(&self) -> Option<u32> { self.0.unix_mode() } } #[cfg(test)] mod test { use super::*; use std::collections::BTreeSet; struct DummyVisitor; impl ZipStreamVisitor for DummyVisitor { fn visit_file(&mut self, _file: &mut ZipFile<'_>) -> ZipResult<()> { Ok(()) } fn visit_additional_metadata( &mut self, _metadata: &ZipStreamFileMetadata, ) -> ZipResult<()> { Ok(()) } } #[allow(dead_code)] #[derive(Default, Debug, Eq, PartialEq)] struct CounterVisitor(u64, u64); impl ZipStreamVisitor for CounterVisitor { fn visit_file(&mut self, _file: &mut ZipFile<'_>) -> ZipResult<()> { self.0 += 1; Ok(()) } fn visit_additional_metadata( &mut self, _metadata: &ZipStreamFileMetadata, ) -> ZipResult<()> { self.1 += 1; Ok(()) } } #[test] fn invalid_offset() { ZipStreamReader::new(io::Cursor::new(include_bytes!( "../../tests/data/invalid_offset.zip" ))) .visit(&mut DummyVisitor) .unwrap_err(); } #[test] fn invalid_offset2() { ZipStreamReader::new(io::Cursor::new(include_bytes!( "../../tests/data/invalid_offset2.zip" ))) .visit(&mut DummyVisitor) .unwrap_err(); } #[test] fn zip_read_streaming() { let reader = ZipStreamReader::new(io::Cursor::new(include_bytes!( "../../tests/data/mimetype.zip" ))); #[derive(Default)] struct V { filenames: BTreeSet<Box<str>>, } impl ZipStreamVisitor for V { fn visit_file(&mut self, file: &mut ZipFile<'_>) -> ZipResult<()> { if file.is_file() { self.filenames.insert(file.name().into()); } Ok(()) } fn visit_additional_metadata( &mut self, metadata: &ZipStreamFileMetadata, ) -> ZipResult<()> { if metadata.is_file() { assert!( self.filenames.contains(metadata.name()), "{} is missing its file content", metadata.name() ); } Ok(()) } } reader.visit(&mut V::default()).unwrap(); } #[test] fn file_and_dir_predicates() { let reader = ZipStreamReader::new(io::Cursor::new(include_bytes!( "../../tests/data/files_and_dirs.zip" ))); #[derive(Default)] struct V { filenames: BTreeSet<Box<str>>, } impl ZipStreamVisitor for V { fn visit_file(&mut self, file: &mut ZipFile<'_>) -> ZipResult<()> { let full_name = file.enclosed_name().unwrap(); let file_name = full_name.file_name().unwrap().to_str().unwrap(); assert!( (file_name.starts_with("dir") && file.is_dir()) || (file_name.starts_with("file") && file.is_file()) ); if file.is_file() { self.filenames.insert(file.name().into()); } Ok(()) } fn visit_additional_metadata( &mut self, metadata: &ZipStreamFileMetadata, ) -> ZipResult<()> { if metadata.is_file() { assert!( self.filenames.contains(metadata.name()), "{} is missing its file content", metadata.name() ); } Ok(()) } } reader.visit(&mut V::default()).unwrap(); } /// test case to ensure we don't preemptively over allocate based on the /// declared number of files in the CDE of an invalid zip when the number of /// files declared is more than the alleged offset in the CDE #[test] fn invalid_cde_number_of_files_allocation_smaller_offset() { ZipStreamReader::new(io::Cursor::new(include_bytes!( "../../tests/data/invalid_cde_number_of_files_allocation_smaller_offset.zip" ))) .visit(&mut DummyVisitor) .unwrap_err(); } /// test case to ensure we don't preemptively over allocate based on the /// declared number of files in the CDE of an invalid zip when the number of /// files declared is less than the alleged offset in the CDE #[test] fn invalid_cde_number_of_files_allocation_greater_offset() { ZipStreamReader::new(io::Cursor::new(include_bytes!( "../../tests/data/invalid_cde_number_of_files_allocation_greater_offset.zip" ))) .visit(&mut DummyVisitor) .unwrap_err(); } }
use crc32fast::Hasher; use lzma_rs::decompress::raw::Lzma2Decoder; use std::{ collections::VecDeque, io::{BufRead, Error, Read, Result, Write}, }; #[derive(Debug)] pub struct XzDecoder<R: BufRead> { compressed_reader: R, stream_size: usize, buf: VecDeque<u8>, check_size: usize, records: Vec<(usize, usize)>, flags: [u8; 2], } impl<R: BufRead> XzDecoder<R> { pub fn new(inner: R) -> Self { XzDecoder { compressed_reader: inner, stream_size: 0, buf: VecDeque::new(), check_size: 0, records: vec![], flags: [0, 0], } } } struct CountReader<'a, R: BufRead> { inner: &'a mut R, count: &'a mut usize, } impl<R: BufRead> Read for CountReader<'_, R> { fn read(&mut self, buf: &mut [u8]) -> Result<usize> { let count = self.inner.read(buf)?; *self.count += count; Ok(count) } } impl<R: BufRead> BufRead for CountReader<'_, R> { fn fill_buf(&mut self) -> Result<&[u8]> { self.inner.fill_buf() } fn consume(&mut self, amt: usize) { self.inner.consume(amt); *self.count += amt; } } struct BufWriter<'a> { inner: &'a mut [u8], written: &'a mut usize, total: &'a mut usize, rest: &'a mut VecDeque<u8>, } impl Write for BufWriter<'_> { fn write(&mut self, buf: &[u8]) -> Result<usize> { if self.inner.len() > *self.written { let len = std::cmp::min(buf.len(), self.inner.len() - *self.written); self.inner[*self.written..*self.written + len].copy_from_slice(&buf[..len]); *self.written += len; *self.total += len; Ok(len) } else { self.rest.extend(buf.iter()); *self.total += buf.len(); Ok(buf.len()) } } fn flush(&mut self) -> Result<()> { Ok(()) } } fn error<T>(s: &'static str) -> Result<T> { Err(Error::new(std::io::ErrorKind::InvalidData, s)) } fn get_multibyte<R: BufRead>(input: &mut R, hasher: &mut Hasher) -> Result<u64> { let mut result = 0; for i in 0..9 { let mut b = [0u8; 1]; input.read_exact(&mut b)?; hasher.update(&b); let b = b[0]; result ^= ((b & 0x7F) as u64) << (i * 7); if (b & 0x80) == 0 { return Ok(result); } } error("Invalid multi-byte encoding") } impl<R: BufRead> Read for XzDecoder<R> { fn read(&mut self, buf: &mut [u8]) -> Result<usize> { if !self.buf.is_empty() { let len = std::cmp::min(buf.len(), self.buf.len()); buf[..len].copy_from_slice(&self.buf.as_slices().0[..len]); self.buf.drain(..len); return Ok(len); } let mut reader = CountReader { inner: &mut self.compressed_reader, count: &mut self.stream_size, }; if *reader.count == 0 { let mut b = [0u8; 12]; match reader.read(&mut b) { Ok(0) => return Ok(0), Err(e) => return Err(e), _ => (), } if b[..6] != b"\xFD7zXZ\0"[..] { return error("Invalid XZ header"); } self.flags = [b[6], b[7]]; if self.flags[0] != 0 || self.flags[1] & 0xF0 != 0 { return error("Invalid XZ stream flags"); } match self.flags[1] & 0x0F { 0 => self.check_size = 0, 1 => self.check_size = 4, _ => return error("Unsupported XZ stream flags"), } let mut digest = Hasher::new(); digest.update(&self.flags); if digest.finalize().to_le_bytes() != b[8..] { return error("Invalid XZ stream flags CRC32"); } } let block_begin = *reader.count; let mut b = [0u8; 1]; reader.read_exact(&mut b)?; let mut digest = Hasher::new(); digest.update(&b); if b[0] == 0 { // index let num_records = get_multibyte(&mut reader, &mut digest)?; if num_records != self.records.len() as u64 { return error("Invalid XZ index record count"); } for (unpadded_size, total) in &self.records { if get_multibyte(&mut reader, &mut digest)? != *unpadded_size as u64 { return error("Invalid XZ unpadded size"); } if get_multibyte(&mut reader, &mut digest)? != *total as u64 { return error("Invalid XZ uncompressed size"); } } let mut size = *reader.count - block_begin; let mut b = vec![0u8; (4 - (size & 0x3)) & 0x3]; reader.read_exact(b.as_mut_slice())?; if !b.iter().all(|&b| b == 0) { return error("Invalid XZ index padding"); } digest.update(b.as_slice()); size += b.len(); let mut b = [0u8; 16]; reader.read_exact(&mut b)?; if digest.finalize().to_le_bytes() != b[..4] { return error("Invalid XZ index CRC32"); } let mut digest = Hasher::new(); digest.update(&b[8..14]); if digest.finalize().to_le_bytes() != b[4..8] { return error("Invalid XZ footer CRC32"); } if b[8..12] != ((size >> 2) as u32).to_le_bytes() { return error("Invalid XZ footer size"); } if self.flags != b[12..14] { return error("Invalid XZ footer flags"); } if &b[14..16] != b"YZ" { return error("Invalid XZ footer magic"); } let mut b = vec![0u8; (4 - (*reader.count & 0x3)) & 0x3]; reader.read_exact(b.as_mut_slice())?; if !b.iter().all(|&b| b == 0) { return error("Invalid XZ footer padding"); } *reader.count = 0; return self.read(buf); } // block let header_end = ((b[0] as usize) << 2) - 1 + *reader.count; let mut b = [0u8; 1]; reader.read_exact(&mut b)?; digest.update(&b); let flags = b[0]; let num_filters = (flags & 0x03) + 1; if flags & 0x3C != 0 { return error("Invalid XZ block flags"); } if flags & 0x40 != 0 { get_multibyte(&mut reader, &mut digest)?; } if flags & 0x80 != 0 { get_multibyte(&mut reader, &mut digest)?; } for _ in 0..num_filters { let filter_id = get_multibyte(&mut reader, &mut digest)?; if filter_id != 0x21 { return error("Unsupported XZ filter ID"); } let properties_size = get_multibyte(&mut reader, &mut digest)?; if properties_size != 1 { return error("Unsupported XZ filter properties size"); } reader.read_exact(&mut b)?; if b[0] & 0xC0 != 0 { return error("Unsupported XZ filter properties"); } digest.update(&b); } let Some(padding_bytes) = header_end.checked_sub(*reader.count) else { return error("Invalid XZ block header (too short)"); }; let mut b = vec![0u8; padding_bytes]; reader.read_exact(b.as_mut_slice())?; if !b.iter().all(|&b| b == 0) { return error("Invalid XZ block header padding"); } digest.update(b.as_slice()); let mut b = [0u8; 4]; reader.read_exact(&mut b)?; if digest.finalize().to_le_bytes() != b { return error("Invalid XZ block header CRC32"); } let mut written = 0; let mut total = 0; Lzma2Decoder::new().decompress( &mut reader, &mut BufWriter { inner: buf, written: &mut written, rest: &mut self.buf, total: &mut total, }, )?; let unpadded_size = *reader.count - block_begin; self.records.push((unpadded_size, total)); // ignore check here since zip itself will check it let mut b = vec![0u8; ((4 - (unpadded_size & 0x3)) & 0x3) + self.check_size]; reader.read_exact(b.as_mut_slice())?; if !b.as_slice()[..self.check_size].iter().all(|&b| b == 0) { return error("Invalid XZ block padding"); } Ok(written) } } impl<R: BufRead> XzDecoder<R> { pub fn into_inner(self) -> R { self.compressed_reader } }
//! Types for reading ZIP archives #[cfg(feature = "aes-crypto")] use crate::aes::{AesReader, AesReaderValid}; use crate::compression::{CompressionMethod, Decompressor}; use crate::cp437::FromCp437; use crate::crc32::Crc32Reader; use crate::extra_fields::{ExtendedTimestamp, ExtraField}; use crate::read::zip_archive::{Shared, SharedBuilder}; use crate::result::{ZipError, ZipResult}; use crate::spec::{self, CentralDirectoryEndInfo, DataAndPosition, FixedSizeBlock, Pod}; use crate::types::{ AesMode, AesVendorVersion, DateTime, System, ZipCentralEntryBlock, ZipFileData, ZipLocalEntryBlock, }; use crate::zipcrypto::{ZipCryptoReader, ZipCryptoReaderValid, ZipCryptoValidator}; use indexmap::IndexMap; use std::borrow::Cow; use std::ffi::OsString; use std::fs::create_dir_all; use std::io::{self, copy, prelude::*, sink, SeekFrom}; use std::mem; use std::mem::size_of; use std::ops::Deref; use std::path::{Path, PathBuf}; use std::sync::{Arc, OnceLock}; mod config; pub use config::*; /// Provides high level API for reading from a stream. pub(crate) mod stream; #[cfg(feature = "lzma")] pub(crate) mod lzma; #[cfg(feature = "xz")] pub(crate) mod xz; pub(crate) mod magic_finder; // Put the struct declaration in a private module to convince rustdoc to display ZipArchive nicely pub(crate) mod zip_archive { use indexmap::IndexMap; use std::sync::Arc; /// Extract immutable data from `ZipArchive` to make it cheap to clone #[derive(Debug)] pub(crate) struct Shared { pub(crate) files: IndexMap<Box<str>, super::ZipFileData>, pub(super) offset: u64, pub(super) dir_start: u64, // This isn't yet used anywhere, but it is here for use cases in the future. #[allow(dead_code)] pub(super) config: super::Config, pub(crate) comment: Box<[u8]>, pub(crate) zip64_comment: Option<Box<[u8]>>, } #[derive(Debug)] pub(crate) struct SharedBuilder { pub(crate) files: Vec<super::ZipFileData>, pub(super) offset: u64, pub(super) dir_start: u64, // This isn't yet used anywhere, but it is here for use cases in the future. #[allow(dead_code)] pub(super) config: super::Config, } impl SharedBuilder { pub fn build(self, comment: Box<[u8]>, zip64_comment: Option<Box<[u8]>>) -> Shared { let mut index_map = IndexMap::with_capacity(self.files.len()); self.files.into_iter().for_each(|file| { index_map.insert(file.file_name.clone(), file); }); Shared { files: index_map, offset: self.offset, dir_start: self.dir_start, config: self.config, comment, zip64_comment, } } } /// ZIP archive reader /// /// At the moment, this type is cheap to clone if this is the case for the /// reader it uses. However, this is not guaranteed by this crate and it may /// change in the future. /// /// ```no_run /// use std::io::prelude::*; /// fn list_zip_contents(reader: impl Read + Seek) -> zip::result::ZipResult<()> { /// use zip::HasZipMetadata; /// let mut zip = zip::ZipArchive::new(reader)?; /// /// for i in 0..zip.len() { /// let mut file = zip.by_index(i)?; /// println!("Filename: {}", file.name()); /// std::io::copy(&mut file, &mut std::io::stdout())?; /// } /// /// Ok(()) /// } /// ``` #[derive(Clone, Debug)] pub struct ZipArchive<R> { pub(super) reader: R, pub(super) shared: Arc<Shared>, } } #[cfg(feature = "aes-crypto")] use crate::aes::PWD_VERIFY_LENGTH; use crate::extra_fields::UnicodeExtraField; use crate::result::ZipError::{InvalidArchive, InvalidPassword}; use crate::spec::is_dir; use crate::types::ffi::S_IFLNK; use crate::unstable::{path_to_string, LittleEndianReadExt}; pub use zip_archive::ZipArchive; #[allow(clippy::large_enum_variant)] pub(crate) enum CryptoReader<'a> { Plaintext(io::Take<&'a mut dyn Read>), ZipCrypto(ZipCryptoReaderValid<io::Take<&'a mut dyn Read>>), #[cfg(feature = "aes-crypto")] Aes { reader: AesReaderValid<io::Take<&'a mut dyn Read>>, vendor_version: AesVendorVersion, }, } impl Read for CryptoReader<'_> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { match self { CryptoReader::Plaintext(r) => r.read(buf), CryptoReader::ZipCrypto(r) => r.read(buf), #[cfg(feature = "aes-crypto")] CryptoReader::Aes { reader: r, .. } => r.read(buf), } } fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> { match self { CryptoReader::Plaintext(r) => r.read_to_end(buf), CryptoReader::ZipCrypto(r) => r.read_to_end(buf), #[cfg(feature = "aes-crypto")] CryptoReader::Aes { reader: r, .. } => r.read_to_end(buf), } } fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> { match self { CryptoReader::Plaintext(r) => r.read_to_string(buf), CryptoReader::ZipCrypto(r) => r.read_to_string(buf), #[cfg(feature = "aes-crypto")] CryptoReader::Aes { reader: r, .. } => r.read_to_string(buf), } } } impl<'a> CryptoReader<'a> { /// Consumes this decoder, returning the underlying reader. pub fn into_inner(self) -> io::Take<&'a mut dyn Read> { match self { CryptoReader::Plaintext(r) => r, CryptoReader::ZipCrypto(r) => r.into_inner(), #[cfg(feature = "aes-crypto")] CryptoReader::Aes { reader: r, .. } => r.into_inner(), } } /// Returns `true` if the data is encrypted using AE2. pub const fn is_ae2_encrypted(&self) -> bool { #[cfg(feature = "aes-crypto")] return matches!( self, CryptoReader::Aes { vendor_version: AesVendorVersion::Ae2, .. } ); #[cfg(not(feature = "aes-crypto"))] false } } #[cold] fn invalid_state<T>() -> io::Result<T> { Err(io::Error::new( io::ErrorKind::Other, "ZipFileReader was in an invalid state", )) } pub(crate) enum ZipFileReader<'a> { NoReader, Raw(io::Take<&'a mut dyn Read>), Compressed(Box<Crc32Reader<Decompressor<io::BufReader<CryptoReader<'a>>>>>), } impl Read for ZipFileReader<'_> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { match self { ZipFileReader::NoReader => invalid_state(), ZipFileReader::Raw(r) => r.read(buf), ZipFileReader::Compressed(r) => r.read(buf), } } fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> { match self { ZipFileReader::NoReader => invalid_state(), ZipFileReader::Raw(r) => r.read_exact(buf), ZipFileReader::Compressed(r) => r.read_exact(buf), } } fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> { match self { ZipFileReader::NoReader => invalid_state(), ZipFileReader::Raw(r) => r.read_to_end(buf), ZipFileReader::Compressed(r) => r.read_to_end(buf), } } fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> { match self { ZipFileReader::NoReader => invalid_state(), ZipFileReader::Raw(r) => r.read_to_string(buf), ZipFileReader::Compressed(r) => r.read_to_string(buf), } } } impl<'a> ZipFileReader<'a> { fn into_inner(self) -> io::Result<io::Take<&'a mut dyn Read>> { match self { ZipFileReader::NoReader => invalid_state(), ZipFileReader::Raw(r) => Ok(r), ZipFileReader::Compressed(r) => { Ok(r.into_inner().into_inner().into_inner().into_inner()) } } } } /// A struct for reading a zip file pub struct ZipFile<'a> { pub(crate) data: Cow<'a, ZipFileData>, pub(crate) reader: ZipFileReader<'a>, } /// A struct for reading and seeking a zip file pub struct ZipFileSeek<'a, R> { data: Cow<'a, ZipFileData>, reader: ZipFileSeekReader<'a, R>, } enum ZipFileSeekReader<'a, R> { Raw(SeekableTake<'a, R>), } struct SeekableTake<'a, R> { inner: &'a mut R, inner_starting_offset: u64, length: u64, current_offset: u64, } impl<'a, R: Seek> SeekableTake<'a, R> { pub fn new(inner: &'a mut R, length: u64) -> io::Result<Self> { let inner_starting_offset = inner.stream_position()?; Ok(Self { inner, inner_starting_offset, length, current_offset: 0, }) } } impl<R: Seek> Seek for SeekableTake<'_, R> { fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> { let offset = match pos { SeekFrom::Start(offset) => Some(offset), SeekFrom::End(offset) => self.length.checked_add_signed(offset), SeekFrom::Current(offset) => self.current_offset.checked_add_signed(offset), }; match offset { None => Err(io::Error::new( io::ErrorKind::InvalidInput, "invalid seek to a negative or overflowing position", )), Some(offset) => { let clamped_offset = std::cmp::min(self.length, offset); let new_inner_offset = self .inner .seek(SeekFrom::Start(self.inner_starting_offset + clamped_offset))?; self.current_offset = new_inner_offset - self.inner_starting_offset; Ok(new_inner_offset) } } } } impl<R: Read> Read for SeekableTake<'_, R> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { let written = self .inner .take(self.length - self.current_offset) .read(buf)?; self.current_offset += written as u64; Ok(written) } } pub(crate) fn find_content<'a>( data: &ZipFileData, reader: &'a mut (impl Read + Seek), ) -> ZipResult<io::Take<&'a mut dyn Read>> { // TODO: use .get_or_try_init() once stabilized to provide a closure returning a Result! let data_start = match data.data_start.get() { Some(data_start) => *data_start, None => find_data_start(data, reader)?, }; reader.seek(SeekFrom::Start(data_start))?; Ok((reader as &mut dyn Read).take(data.compressed_size)) } fn find_content_seek<'a, R: Read + Seek>( data: &ZipFileData, reader: &'a mut R, ) -> ZipResult<SeekableTake<'a, R>> { // Parse local header let data_start = find_data_start(data, reader)?; reader.seek(SeekFrom::Start(data_start))?; // Explicit Ok and ? are needed to convert io::Error to ZipError Ok(SeekableTake::new(reader, data.compressed_size)?) } fn find_data_start( data: &ZipFileData, reader: &mut (impl Read + Seek + Sized), ) -> Result<u64, ZipError> { // Go to start of data. reader.seek(SeekFrom::Start(data.header_start))?; // Parse static-sized fields and check the magic value. let block = ZipLocalEntryBlock::parse(reader)?; // Calculate the end of the local header from the fields we just parsed. let variable_fields_len = // Each of these fields must be converted to u64 before adding, as the result may // easily overflow a u16. block.file_name_length as u64 + block.extra_field_length as u64; let data_start = data.header_start + size_of::<ZipLocalEntryBlock>() as u64 + variable_fields_len; // Set the value so we don't have to read it again. match data.data_start.set(data_start) { Ok(()) => (), // If the value was already set in the meantime, ensure it matches (this is probably // unnecessary). Err(_) => { debug_assert_eq!(*data.data_start.get().unwrap(), data_start); } } Ok(data_start) } #[allow(clippy::too_many_arguments)] pub(crate) fn make_crypto_reader<'a>( data: &ZipFileData, reader: io::Take<&'a mut dyn Read>, password: Option<&[u8]>, aes_info: Option<(AesMode, AesVendorVersion, CompressionMethod)>, ) -> ZipResult<CryptoReader<'a>> { #[allow(deprecated)] { if let CompressionMethod::Unsupported(_) = data.compression_method { return unsupported_zip_error("Compression method not supported"); } } let reader = match (password, aes_info) { #[cfg(not(feature = "aes-crypto"))] (Some(_), Some(_)) => { return Err(ZipError::UnsupportedArchive( "AES encrypted files cannot be decrypted without the aes-crypto feature.", )) } #[cfg(feature = "aes-crypto")] (Some(password), Some((aes_mode, vendor_version, _))) => CryptoReader::Aes { reader: AesReader::new(reader, aes_mode, data.compressed_size).validate(password)?, vendor_version, }, (Some(password), None) => { let mut last_modified_time = data.last_modified_time; if !data.using_data_descriptor { last_modified_time = None; } let validator = if let Some(last_modified_time) = last_modified_time { ZipCryptoValidator::InfoZipMsdosTime(last_modified_time.timepart()) } else { ZipCryptoValidator::PkzipCrc32(data.crc32) }; CryptoReader::ZipCrypto(ZipCryptoReader::new(reader, password).validate(validator)?) } (None, Some(_)) => return Err(InvalidPassword), (None, None) => CryptoReader::Plaintext(reader), }; Ok(reader) } pub(crate) fn make_reader( compression_method: CompressionMethod, crc32: u32, reader: CryptoReader, ) -> ZipResult<ZipFileReader> { let ae2_encrypted = reader.is_ae2_encrypted(); Ok(ZipFileReader::Compressed(Box::new(Crc32Reader::new( Decompressor::new(io::BufReader::new(reader), compression_method)?, crc32, ae2_encrypted, )))) } #[derive(Debug)] pub(crate) struct CentralDirectoryInfo { pub(crate) archive_offset: u64, pub(crate) directory_start: u64, pub(crate) number_of_files: usize, pub(crate) disk_number: u32, pub(crate) disk_with_central_directory: u32, } impl<'a> TryFrom<&'a CentralDirectoryEndInfo> for CentralDirectoryInfo { type Error = ZipError; fn try_from(value: &'a CentralDirectoryEndInfo) -> Result<Self, Self::Error> { let (relative_cd_offset, number_of_files, disk_number, disk_with_central_directory) = match &value.eocd64 { Some(DataAndPosition { data: eocd64, .. }) => { if eocd64.number_of_files_on_this_disk > eocd64.number_of_files { return Err(InvalidArchive( "ZIP64 footer indicates more files on this disk than in the whole archive", )); } else if eocd64.version_needed_to_extract > eocd64.version_made_by { return Err(InvalidArchive( "ZIP64 footer indicates a new version is needed to extract this archive than the \ version that wrote it", )); } ( eocd64.central_directory_offset, eocd64.number_of_files as usize, eocd64.disk_number, eocd64.disk_with_central_directory, ) } _ => ( value.eocd.data.central_directory_offset as u64, value.eocd.data.number_of_files_on_this_disk as usize, value.eocd.data.disk_number as u32, value.eocd.data.disk_with_central_directory as u32, ), }; let directory_start = relative_cd_offset .checked_add(value.archive_offset) .ok_or(InvalidArchive("Invalid central directory size or offset"))?; Ok(Self { archive_offset: value.archive_offset, directory_start, number_of_files, disk_number, disk_with_central_directory, }) } } impl<R> ZipArchive<R> { pub(crate) fn from_finalized_writer( files: IndexMap<Box<str>, ZipFileData>, comment: Box<[u8]>, zip64_comment: Option<Box<[u8]>>, reader: R, central_start: u64, ) -> ZipResult<Self> { let initial_offset = match files.first() { Some((_, file)) => file.header_start, None => central_start, }; let shared = Arc::new(Shared { files, offset: initial_offset, dir_start: central_start, config: Config { archive_offset: ArchiveOffset::Known(initial_offset), }, comment, zip64_comment, }); Ok(Self { reader, shared }) } /// Total size of the files in the archive, if it can be known. Doesn't include directories or /// metadata. pub fn decompressed_size(&self) -> Option<u128> { let mut total = 0u128; for file in self.shared.files.values() { if file.using_data_descriptor { return None; } total = total.checked_add(file.uncompressed_size as u128)?; } Some(total) } } impl<R: Read + Seek> ZipArchive<R> { pub(crate) fn merge_contents<W: Write + Seek>( &mut self, mut w: W, ) -> ZipResult<IndexMap<Box<str>, ZipFileData>> { if self.shared.files.is_empty() { return Ok(IndexMap::new()); } let mut new_files = self.shared.files.clone(); /* The first file header will probably start at the beginning of the file, but zip doesn't * enforce that, and executable zips like PEX files will have a shebang line so will * definitely be greater than 0. * * assert_eq!(0, new_files[0].header_start); // Avoid this. */ let first_new_file_header_start = w.stream_position()?; /* Push back file header starts for all entries in the covered files. */ new_files.values_mut().try_for_each(|f| { /* This is probably the only really important thing to change. */ f.header_start = f .header_start .checked_add(first_new_file_header_start) .ok_or(InvalidArchive( "new header start from merge would have been too large", ))?; /* This is only ever used internally to cache metadata lookups (it's not part of the * zip spec), and 0 is the sentinel value. */ f.central_header_start = 0; /* This is an atomic variable so it can be updated from another thread in the * implementation (which is good!). */ if let Some(old_data_start) = f.data_start.take() { let new_data_start = old_data_start .checked_add(first_new_file_header_start) .ok_or(InvalidArchive( "new data start from merge would have been too large", ))?; f.data_start.get_or_init(|| new_data_start); } Ok::<_, ZipError>(()) })?; /* Rewind to the beginning of the file. * * NB: we *could* decide to start copying from new_files[0].header_start instead, which * would avoid copying over e.g. any pex shebangs or other file contents that start before * the first zip file entry. However, zip files actually shouldn't care about garbage data * in *between* real entries, since the central directory header records the correct start * location of each, and keeping track of that math is more complicated logic that will only * rarely be used, since most zips that get merged together are likely to be produced * specifically for that purpose (and therefore are unlikely to have a shebang or other * preface). Finally, this preserves any data that might actually be useful. */ self.reader.rewind()?; /* Find the end of the file data. */ let length_to_read = self.shared.dir_start; /* Produce a Read that reads bytes up until the start of the central directory header. * This "as &mut dyn Read" trick is used elsewhere to avoid having to clone the underlying * handle, which it really shouldn't need to anyway. */ let mut limited_raw = (&mut self.reader as &mut dyn Read).take(length_to_read); /* Copy over file data from source archive directly. */ io::copy(&mut limited_raw, &mut w)?; /* Return the files we've just written to the data stream. */ Ok(new_files) } /// Get the directory start offset and number of files. This is done in a /// separate function to ease the control flow design. pub(crate) fn get_metadata(config: Config, reader: &mut R) -> ZipResult<Shared> { // End of the probed region, initially set to the end of the file let file_len = reader.seek(io::SeekFrom::End(0))?; let mut end_exclusive = file_len; loop { // Find the EOCD and possibly EOCD64 entries and determine the archive offset. let cde = spec::find_central_directory( reader, config.archive_offset, end_exclusive, file_len, )?; // Turn EOCD into internal representation. let Ok(shared) = CentralDirectoryInfo::try_from(&cde) .and_then(|info| Self::read_central_header(info, config, reader)) else { // The next EOCD candidate should start before the current one. end_exclusive = cde.eocd.position; continue; }; return Ok(shared.build( cde.eocd.data.zip_file_comment, cde.eocd64.map(|v| v.data.extensible_data_sector), )); } } fn read_central_header( dir_info: CentralDirectoryInfo, config: Config, reader: &mut R, ) -> Result<SharedBuilder, ZipError> { // If the parsed number of files is greater than the offset then // something fishy is going on and we shouldn't trust number_of_files. let file_capacity = if dir_info.number_of_files > dir_info.directory_start as usize { 0 } else { dir_info.number_of_files }; if dir_info.disk_number != dir_info.disk_with_central_directory { return unsupported_zip_error("Support for multi-disk files is not implemented"); } if file_capacity.saturating_mul(size_of::<ZipFileData>()) > isize::MAX as usize { return unsupported_zip_error("Oversized central directory"); } let mut files = Vec::with_capacity(file_capacity); reader.seek(SeekFrom::Start(dir_info.directory_start))?; for _ in 0..dir_info.number_of_files { let file = central_header_to_zip_file(reader, &dir_info)?; files.push(file); } Ok(SharedBuilder { files, offset: dir_info.archive_offset, dir_start: dir_info.directory_start, config, }) } /// Returns the verification value and salt for the AES encryption of the file /// /// It fails if the file number is invalid. /// /// # Returns /// /// - None if the file is not encrypted with AES #[cfg(feature = "aes-crypto")] pub fn get_aes_verification_key_and_salt( &mut self, file_number: usize, ) -> ZipResult<Option<AesInfo>> { let (_, data) = self .shared .files .get_index(file_number) .ok_or(ZipError::FileNotFound)?; let limit_reader = find_content(data, &mut self.reader)?; match data.aes_mode { None => Ok(None), Some((aes_mode, _, _)) => { let (verification_value, salt) = AesReader::new(limit_reader, aes_mode, data.compressed_size) .get_verification_value_and_salt()?; let aes_info = AesInfo { aes_mode, verification_value, salt, }; Ok(Some(aes_info)) } } } /// Read a ZIP archive, collecting the files it contains. /// /// This uses the central directory record of the ZIP file, and ignores local file headers. /// /// A default [`Config`] is used. pub fn new(reader: R) -> ZipResult<ZipArchive<R>> { Self::with_config(Default::default(), reader) } /// Read a ZIP archive providing a read configuration, collecting the files it contains. /// /// This uses the central directory record of the ZIP file, and ignores local file headers. pub fn with_config(config: Config, mut reader: R) -> ZipResult<ZipArchive<R>> { let shared = Self::get_metadata(config, &mut reader)?; Ok(ZipArchive { reader, shared: shared.into(), }) } /// Extract a Zip archive into a directory, overwriting files if they /// already exist. Paths are sanitized with [`ZipFile::enclosed_name`]. /// /// Extraction is not atomic. If an error is encountered, some of the files /// may be left on disk. However, on Unix targets, no newly-created directories with part but /// not all of their contents extracted will be readable, writable or usable as process working /// directories by any non-root user except you. /// /// On Unix and Windows, symbolic links are extracted correctly. On other platforms such as /// WebAssembly, symbolic links aren't supported, so they're extracted as normal files /// containing the target path in UTF-8. pub fn extract<P: AsRef<Path>>(&mut self, directory: P) -> ZipResult<()> { use std::fs; #[cfg(unix)] let mut files_by_unix_mode = Vec::new(); for i in 0..self.len() { let mut file = self.by_index(i)?; let filepath = file .enclosed_name() .ok_or(InvalidArchive("Invalid file path"))?; let outpath = directory.as_ref().join(filepath); if file.is_dir() { Self::make_writable_dir_all(&outpath)?; continue; } let symlink_target = if file.is_symlink() && (cfg!(unix) || cfg!(windows)) { let mut target = Vec::with_capacity(file.size() as usize); file.read_to_end(&mut target)?; Some(target) } else { None }; drop(file); if let Some(p) = outpath.parent() { Self::make_writable_dir_all(p)?; } if let Some(target) = symlink_target { #[cfg(unix)] { use std::os::unix::ffi::OsStringExt; let target = OsString::from_vec(target); std::os::unix::fs::symlink(&target, outpath.as_path())?; } #[cfg(windows)] { let Ok(target) = String::from_utf8(target) else { return Err(ZipError::InvalidArchive("Invalid UTF-8 as symlink target")); }; let target = target.into_boxed_str(); let target_is_dir_from_archive = self.shared.files.contains_key(&target) && is_dir(&target); let target_path = directory.as_ref().join(OsString::from(target.to_string())); let target_is_dir = if target_is_dir_from_archive { true } else if let Ok(meta) = std::fs::metadata(&target_path) { meta.is_dir() } else { false }; if target_is_dir { std::os::windows::fs::symlink_dir(target_path, outpath.as_path())?; } else { std::os::windows::fs::symlink_file(target_path, outpath.as_path())?; } } continue; } let mut file = self.by_index(i)?; let mut outfile = fs::File::create(&outpath)?; io::copy(&mut file, &mut outfile)?; #[cfg(unix)] { // Check for real permissions, which we'll set in a second pass if let Some(mode) = file.unix_mode() { files_by_unix_mode.push((outpath.clone(), mode)); } } } #[cfg(unix)] { use std::cmp::Reverse; use std::os::unix::fs::PermissionsExt; if files_by_unix_mode.len() > 1 { // Ensure we update children's permissions before making a parent unwritable files_by_unix_mode.sort_by_key(|(path, _)| Reverse(path.clone())); } for (path, mode) in files_by_unix_mode.into_iter() { fs::set_permissions(&path, fs::Permissions::from_mode(mode))?; } } Ok(()) } fn make_writable_dir_all<T: AsRef<Path>>(outpath: T) -> Result<(), ZipError> { create_dir_all(outpath.as_ref())?; #[cfg(unix)] { // Dirs must be writable until all normal files are extracted use std::os::unix::fs::PermissionsExt; std::fs::set_permissions( outpath.as_ref(), std::fs::Permissions::from_mode( 0o700 | std::fs::metadata(outpath.as_ref())?.permissions().mode(), ), )?; } Ok(()) } /// Number of files contained in this zip. pub fn len(&self) -> usize { self.shared.files.len() } /// Get the starting offset of the zip central directory. pub fn central_directory_start(&self) -> u64 { self.shared.dir_start } /// Whether this zip archive contains no files pub fn is_empty(&self) -> bool { self.len() == 0 } /// Get the offset from the beginning of the underlying reader that this zip begins at, in bytes. /// /// Normally this value is zero, but if the zip has arbitrary data prepended to it, then this value will be the size /// of that prepended data. pub fn offset(&self) -> u64 { self.shared.offset } /// Get the comment of the zip archive. pub fn comment(&self) -> &[u8] { &self.shared.comment } /// Get the ZIP64 comment of the zip archive, if it is ZIP64. pub fn zip64_comment(&self) -> Option<&[u8]> { self.shared.zip64_comment.as_deref() } /// Returns an iterator over all the file and directory names in this archive. pub fn file_names(&self) -> impl Iterator<Item = &str> { self.shared.files.keys().map(|s| s.as_ref()) } /// Search for a file entry by name, decrypt with given password /// /// # Warning /// /// The implementation of the cryptographic algorithms has not /// gone through a correctness review, and you should assume it is insecure: /// passwords used with this API may be compromised. /// /// This function sometimes accepts wrong password. This is because the ZIP spec only allows us /// to check for a 1/256 chance that the password is correct. /// There are many passwords out there that will also pass the validity checks /// we are able to perform. This is a weakness of the ZipCrypto algorithm, /// due to its fairly primitive approach to cryptography. pub fn by_name_decrypt(&mut self, name: &str, password: &[u8]) -> ZipResult<ZipFile> { self.by_name_with_optional_password(name, Some(password)) } /// Search for a file entry by name pub fn by_name(&mut self, name: &str) -> ZipResult<ZipFile> { self.by_name_with_optional_password(name, None) } /// Get the index of a file entry by name, if it's present. #[inline(always)] pub fn index_for_name(&self, name: &str) -> Option<usize> { self.shared.files.get_index_of(name) } /// Get the index of a file entry by path, if it's present. #[inline(always)] pub fn index_for_path<T: AsRef<Path>>(&self, path: T) -> Option<usize> { self.index_for_name(&path_to_string(path)) } /// Get the name of a file entry, if it's present. #[inline(always)] pub fn name_for_index(&self, index: usize) -> Option<&str> { self.shared .files .get_index(index) .map(|(name, _)| name.as_ref()) } /// Search for a file entry by name and return a seekable object. pub fn by_name_seek(&mut self, name: &str) -> ZipResult<ZipFileSeek<R>> { self.by_index_seek(self.index_for_name(name).ok_or(ZipError::FileNotFound)?) } /// Search for a file entry by index and return a seekable object. pub fn by_index_seek(&mut self, index: usize) -> ZipResult<ZipFileSeek<R>> { let reader = &mut self.reader; self.shared .files .get_index(index) .ok_or(ZipError::FileNotFound) .and_then(move |(_, data)| { let seek_reader = match data.compression_method { CompressionMethod::Stored => { ZipFileSeekReader::Raw(find_content_seek(data, reader)?) } _ => { return Err(ZipError::UnsupportedArchive( "Seekable compressed files are not yet supported", )) } }; Ok(ZipFileSeek { reader: seek_reader, data: Cow::Borrowed(data), }) }) } fn by_name_with_optional_password<'a>( &'a mut self, name: &str, password: Option<&[u8]>, ) -> ZipResult<ZipFile<'a>> { let Some(index) = self.shared.files.get_index_of(name) else { return Err(ZipError::FileNotFound); }; self.by_index_with_optional_password(index, password) } /// Get a contained file by index, decrypt with given password /// /// # Warning /// /// The implementation of the cryptographic algorithms has not /// gone through a correctness review, and you should assume it is insecure: /// passwords used with this API may be compromised. /// /// This function sometimes accepts wrong password. This is because the ZIP spec only allows us /// to check for a 1/256 chance that the password is correct. /// There are many passwords out there that will also pass the validity checks /// we are able to perform. This is a weakness of the ZipCrypto algorithm, /// due to its fairly primitive approach to cryptography. pub fn by_index_decrypt( &mut self, file_number: usize, password: &[u8], ) -> ZipResult<ZipFile<'_>> { self.by_index_with_optional_password(file_number, Some(password)) } /// Get a contained file by index pub fn by_index(&mut self, file_number: usize) -> ZipResult<ZipFile<'_>> { self.by_index_with_optional_password(file_number, None) } /// Get a contained file by index without decompressing it pub fn by_index_raw(&mut self, file_number: usize) -> ZipResult<ZipFile<'_>> { let reader = &mut self.reader; let (_, data) = self .shared .files .get_index(file_number) .ok_or(ZipError::FileNotFound)?; Ok(ZipFile { reader: ZipFileReader::Raw(find_content(data, reader)?), data: Cow::Borrowed(data), }) } fn by_index_with_optional_password( &mut self, file_number: usize, mut password: Option<&[u8]>, ) -> ZipResult<ZipFile<'_>> { let (_, data) = self .shared .files .get_index(file_number) .ok_or(ZipError::FileNotFound)?; match (password, data.encrypted) { (None, true) => return Err(ZipError::UnsupportedArchive(ZipError::PASSWORD_REQUIRED)), (Some(_), false) => password = None, //Password supplied, but none needed! Discard. _ => {} } let limit_reader = find_content(data, &mut self.reader)?; let crypto_reader = make_crypto_reader(data, limit_reader, password, data.aes_mode)?; Ok(ZipFile { data: Cow::Borrowed(data), reader: make_reader(data.compression_method, data.crc32, crypto_reader)?, }) } /// Unwrap and return the inner reader object /// /// The position of the reader is undefined. pub fn into_inner(self) -> R { self.reader } } /// Holds the AES information of a file in the zip archive #[derive(Debug)] #[cfg(feature = "aes-crypto")] pub struct AesInfo { /// The AES encryption mode pub aes_mode: AesMode, /// The verification key pub verification_value: [u8; PWD_VERIFY_LENGTH], /// The salt pub salt: Vec<u8>, } const fn unsupported_zip_error<T>(detail: &'static str) -> ZipResult<T> { Err(ZipError::UnsupportedArchive(detail)) } /// Parse a central directory entry to collect the information for the file. pub(crate) fn central_header_to_zip_file<R: Read + Seek>( reader: &mut R, central_directory: &CentralDirectoryInfo, ) -> ZipResult<ZipFileData> { let central_header_start = reader.stream_position()?; // Parse central header let block = ZipCentralEntryBlock::parse(reader)?; let file = central_header_to_zip_file_inner( reader, central_directory.archive_offset, central_header_start, block, )?; let central_header_end = reader.stream_position()?; if file.header_start >= central_directory.directory_start { return Err(InvalidArchive( "A local file entry can't start after the central directory", )); } let data_start = find_data_start(&file, reader)?; if data_start > central_directory.directory_start { return Err(InvalidArchive( "File data can't start after the central directory", )); } reader.seek(SeekFrom::Start(central_header_end))?; Ok(file) } #[inline] fn read_variable_length_byte_field<R: Read>(reader: &mut R, len: usize) -> io::Result<Box<[u8]>> { let mut data = vec![0; len].into_boxed_slice(); reader.read_exact(&mut data)?; Ok(data) } /// Parse a central directory entry to collect the information for the file. fn central_header_to_zip_file_inner<R: Read>( reader: &mut R, archive_offset: u64, central_header_start: u64, block: ZipCentralEntryBlock, ) -> ZipResult<ZipFileData> { let ZipCentralEntryBlock { // magic, version_made_by, // version_to_extract, flags, compression_method, last_mod_time, last_mod_date, crc32, compressed_size, uncompressed_size, file_name_length, extra_field_length, file_comment_length, // disk_number, // internal_file_attributes, external_file_attributes, offset, .. } = block; let encrypted = flags & 1 == 1; let is_utf8 = flags & (1 << 11) != 0; let using_data_descriptor = flags & (1 << 3) != 0; let file_name_raw = read_variable_length_byte_field(reader, file_name_length as usize)?; let extra_field = read_variable_length_byte_field(reader, extra_field_length as usize)?; let file_comment_raw = read_variable_length_byte_field(reader, file_comment_length as usize)?; let file_name: Box<str> = match is_utf8 { true => String::from_utf8_lossy(&file_name_raw).into(), false => file_name_raw.clone().from_cp437(), }; let file_comment: Box<str> = match is_utf8 { true => String::from_utf8_lossy(&file_comment_raw).into(), false => file_comment_raw.from_cp437(), }; // Construct the result let mut result = ZipFileData { system: System::from((version_made_by >> 8) as u8), /* NB: this strips the top 8 bits! */ version_made_by: version_made_by as u8, encrypted, using_data_descriptor, is_utf8, compression_method: CompressionMethod::parse_from_u16(compression_method), compression_level: None, last_modified_time: DateTime::try_from_msdos(last_mod_date, last_mod_time).ok(), crc32, compressed_size: compressed_size.into(), uncompressed_size: uncompressed_size.into(), file_name, file_name_raw, extra_field: Some(Arc::new(extra_field.to_vec())), central_extra_field: None, file_comment, header_start: offset.into(), extra_data_start: None, central_header_start, data_start: OnceLock::new(), external_attributes: external_file_attributes, large_file: false, aes_mode: None, aes_extra_data_start: 0, extra_fields: Vec::new(), }; match parse_extra_field(&mut result) { Ok(stripped_extra_field) => { result.extra_field = stripped_extra_field; } Err(ZipError::Io(..)) => {} Err(e) => return Err(e), } let aes_enabled = result.compression_method == CompressionMethod::AES; if aes_enabled && result.aes_mode.is_none() { return Err(InvalidArchive( "AES encryption without AES extra data field", )); } // Account for shifted zip offsets. result.header_start = result .header_start .checked_add(archive_offset) .ok_or(InvalidArchive("Archive header is too large"))?; Ok(result) } pub(crate) fn parse_extra_field(file: &mut ZipFileData) -> ZipResult<Option<Arc<Vec<u8>>>> { let Some(ref extra_field) = file.extra_field else { return Ok(None); }; let extra_field = extra_field.clone(); let mut processed_extra_field = extra_field.clone(); let len = extra_field.len(); let mut reader = io::Cursor::new(&**extra_field); /* TODO: codify this structure into Zip64ExtraFieldBlock fields! */ let mut position = reader.position() as usize; while (position) < len { let old_position = position; let remove = parse_single_extra_field(file, &mut reader, position as u64, false)?; position = reader.position() as usize; if remove { let remaining = len - (position - old_position); if remaining == 0 { return Ok(None); } let mut new_extra_field = Vec::with_capacity(remaining); new_extra_field.extend_from_slice(&extra_field[0..old_position]); new_extra_field.extend_from_slice(&extra_field[position..]); processed_extra_field = Arc::new(new_extra_field); } } Ok(Some(processed_extra_field)) } pub(crate) fn parse_single_extra_field<R: Read>( file: &mut ZipFileData, reader: &mut R, bytes_already_read: u64, disallow_zip64: bool, ) -> ZipResult<bool> { let kind = reader.read_u16_le()?; let len = reader.read_u16_le()?; match kind { // Zip64 extended information extra field 0x0001 => { if disallow_zip64 { return Err(InvalidArchive( "Can't write a custom field using the ZIP64 ID", )); } file.large_file = true; let mut consumed_len = 0; if len >= 24 || file.uncompressed_size == spec::ZIP64_BYTES_THR { file.uncompressed_size = reader.read_u64_le()?; consumed_len += size_of::<u64>(); } if len >= 24 || file.compressed_size == spec::ZIP64_BYTES_THR { file.compressed_size = reader.read_u64_le()?; consumed_len += size_of::<u64>(); } if len >= 24 || file.header_start == spec::ZIP64_BYTES_THR { file.header_start = reader.read_u64_le()?; consumed_len += size_of::<u64>(); } let Some(leftover_len) = (len as usize).checked_sub(consumed_len) else { return Err(InvalidArchive("ZIP64 extra-data field is the wrong length")); }; reader.read_exact(&mut vec![0u8; leftover_len])?; return Ok(true); } 0x9901 => { // AES if len != 7 { return Err(ZipError::UnsupportedArchive( "AES extra data field has an unsupported length", )); } let vendor_version = reader.read_u16_le()?; let vendor_id = reader.read_u16_le()?; let mut out = [0u8]; reader.read_exact(&mut out)?; let aes_mode = out[0]; let compression_method = CompressionMethod::parse_from_u16(reader.read_u16_le()?); if vendor_id != 0x4541 { return Err(InvalidArchive("Invalid AES vendor")); } let vendor_version = match vendor_version { 0x0001 => AesVendorVersion::Ae1, 0x0002 => AesVendorVersion::Ae2, _ => return Err(InvalidArchive("Invalid AES vendor version")), }; match aes_mode { 0x01 => file.aes_mode = Some((AesMode::Aes128, vendor_version, compression_method)), 0x02 => file.aes_mode = Some((AesMode::Aes192, vendor_version, compression_method)), 0x03 => file.aes_mode = Some((AesMode::Aes256, vendor_version, compression_method)), _ => return Err(InvalidArchive("Invalid AES encryption strength")), }; file.compression_method = compression_method; file.aes_extra_data_start = bytes_already_read; } 0x5455 => { // extended timestamp // https://libzip.org/specifications/extrafld.txt file.extra_fields.push(ExtraField::ExtendedTimestamp( ExtendedTimestamp::try_from_reader(reader, len)?, )); } 0x6375 => { // Info-ZIP Unicode Comment Extra Field // APPNOTE 4.6.8 and https://libzip.org/specifications/extrafld.txt file.file_comment = String::from_utf8( UnicodeExtraField::try_from_reader(reader, len)? .unwrap_valid(file.file_comment.as_bytes())? .into_vec(), )? .into(); } 0x7075 => { // Info-ZIP Unicode Path Extra Field // APPNOTE 4.6.9 and https://libzip.org/specifications/extrafld.txt file.file_name_raw = UnicodeExtraField::try_from_reader(reader, len)? .unwrap_valid(&file.file_name_raw)?; file.file_name = String::from_utf8(file.file_name_raw.clone().into_vec())?.into_boxed_str(); file.is_utf8 = true; } _ => { reader.read_exact(&mut vec![0u8; len as usize])?; // Other fields are ignored } } Ok(false) } /// A trait for exposing file metadata inside the zip. pub trait HasZipMetadata { /// Get the file metadata fn get_metadata(&self) -> &ZipFileData; } /// Methods for retrieving information on zip files impl<'a> ZipFile<'a> { pub(crate) fn take_raw_reader(&mut self) -> io::Result<io::Take<&'a mut dyn Read>> { mem::replace(&mut self.reader, ZipFileReader::NoReader).into_inner() } /// Get the version of the file pub fn version_made_by(&self) -> (u8, u8) { ( self.get_metadata().version_made_by / 10, self.get_metadata().version_made_by % 10, ) } /// Get the name of the file /// /// # Warnings /// /// It is dangerous to use this name directly when extracting an archive. /// It may contain an absolute path (`/etc/shadow`), or break out of the /// current directory (`../runtime`). Carelessly writing to these paths /// allows an attacker to craft a ZIP archive that will overwrite critical /// files. /// /// You can use the [`ZipFile::enclosed_name`] method to validate the name /// as a safe path. pub fn name(&self) -> &str { &self.get_metadata().file_name } /// Get the name of the file, in the raw (internal) byte representation. /// /// The encoding of this data is currently undefined. pub fn name_raw(&self) -> &[u8] { &self.get_metadata().file_name_raw } /// Get the name of the file in a sanitized form. It truncates the name to the first NULL byte, /// removes a leading '/' and removes '..' parts. #[deprecated( since = "0.5.7", note = "by stripping `..`s from the path, the meaning of paths can change. `mangled_name` can be used if this behaviour is desirable" )] pub fn sanitized_name(&self) -> PathBuf { self.mangled_name() } /// Rewrite the path, ignoring any path components with special meaning. /// /// - Absolute paths are made relative /// - [`ParentDir`]s are ignored /// - Truncates the filename at a NULL byte /// /// This is appropriate if you need to be able to extract *something* from /// any archive, but will easily misrepresent trivial paths like /// `foo/../bar` as `foo/bar` (instead of `bar`). Because of this, /// [`ZipFile::enclosed_name`] is the better option in most scenarios. /// /// [`ParentDir`]: `PathBuf::Component::ParentDir` pub fn mangled_name(&self) -> PathBuf { self.get_metadata().file_name_sanitized() } /// Ensure the file path is safe to use as a [`Path`]. /// /// - It can't contain NULL bytes /// - It can't resolve to a path outside the current directory /// > `foo/../bar` is fine, `foo/../../bar` is not. /// - It can't be an absolute path /// /// This will read well-formed ZIP files correctly, and is resistant /// to path-based exploits. It is recommended over /// [`ZipFile::mangled_name`]. pub fn enclosed_name(&self) -> Option<PathBuf> { self.get_metadata().enclosed_name() } /// Get the comment of the file pub fn comment(&self) -> &str { &self.get_metadata().file_comment } /// Get the compression method used to store the file pub fn compression(&self) -> CompressionMethod { self.get_metadata().compression_method } /// Get if the files is encrypted or not pub fn encrypted(&self) -> bool { self.data.encrypted } /// Get the size of the file, in bytes, in the archive pub fn compressed_size(&self) -> u64 { self.get_metadata().compressed_size } /// Get the size of the file, in bytes, when uncompressed pub fn size(&self) -> u64 { self.get_metadata().uncompressed_size } /// Get the time the file was last modified pub fn last_modified(&self) -> Option<DateTime> { self.data.last_modified_time } /// Returns whether the file is actually a directory pub fn is_dir(&self) -> bool { is_dir(self.name()) } /// Returns whether the file is actually a symbolic link pub fn is_symlink(&self) -> bool { self.unix_mode() .is_some_and(|mode| mode & S_IFLNK == S_IFLNK) } /// Returns whether the file is a normal file (i.e. not a directory or symlink) pub fn is_file(&self) -> bool { !self.is_dir() && !self.is_symlink() } /// Get unix mode for the file pub fn unix_mode(&self) -> Option<u32> { self.get_metadata().unix_mode() } /// Get the CRC32 hash of the original file pub fn crc32(&self) -> u32 { self.get_metadata().crc32 } /// Get the extra data of the zip header for this file pub fn extra_data(&self) -> Option<&[u8]> { self.get_metadata() .extra_field .as_ref() .map(|v| v.deref().deref()) } /// Get the starting offset of the data of the compressed file pub fn data_start(&self) -> u64 { *self.data.data_start.get().unwrap() } /// Get the starting offset of the zip header for this file pub fn header_start(&self) -> u64 { self.get_metadata().header_start } /// Get the starting offset of the zip header in the central directory for this file pub fn central_header_start(&self) -> u64 { self.get_metadata().central_header_start } } /// Methods for retrieving information on zip files impl ZipFile<'_> { /// iterate through all extra fields pub fn extra_data_fields(&self) -> impl Iterator<Item = &ExtraField> { self.data.extra_fields.iter() } } impl HasZipMetadata for ZipFile<'_> { fn get_metadata(&self) -> &ZipFileData { self.data.as_ref() } } impl Read for ZipFile<'_> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.reader.read(buf) } fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> { self.reader.read_exact(buf) } fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> { self.reader.read_to_end(buf) } fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> { self.reader.read_to_string(buf) } } impl<R: Read> Read for ZipFileSeek<'_, R> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { match &mut self.reader { ZipFileSeekReader::Raw(r) => r.read(buf), } } } impl<R: Seek> Seek for ZipFileSeek<'_, R> { fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> { match &mut self.reader { ZipFileSeekReader::Raw(r) => r.seek(pos), } } } impl<R> HasZipMetadata for ZipFileSeek<'_, R> { fn get_metadata(&self) -> &ZipFileData { self.data.as_ref() } } impl Drop for ZipFile<'_> { fn drop(&mut self) { // self.data is Owned, this reader is constructed by a streaming reader. // In this case, we want to exhaust the reader so that the next file is accessible. if let Cow::Owned(_) = self.data { // Get the inner `Take` reader so all decryption, decompression and CRC calculation is skipped. if let Ok(mut inner) = self.take_raw_reader() { let _ = copy(&mut inner, &mut sink()); } } } } /// Read ZipFile structures from a non-seekable reader. /// /// This is an alternative method to read a zip file. If possible, use the ZipArchive functions /// as some information will be missing when reading this manner. /// /// Reads a file header from the start of the stream. Will return `Ok(Some(..))` if a file is /// present at the start of the stream. Returns `Ok(None)` if the start of the central directory /// is encountered. No more files should be read after this. /// /// The Drop implementation of ZipFile ensures that the reader will be correctly positioned after /// the structure is done. /// /// Missing fields are: /// * `comment`: set to an empty string /// * `data_start`: set to 0 /// * `external_attributes`: `unix_mode()`: will return None pub fn read_zipfile_from_stream<R: Read>(reader: &mut R) -> ZipResult<Option<ZipFile<'_>>> { // We can't use the typical ::parse() method, as we follow separate code paths depending on the // "magic" value (since the magic value will be from the central directory header if we've // finished iterating over all the actual files). /* TODO: smallvec? */ let mut block = ZipLocalEntryBlock::zeroed(); reader.read_exact(block.as_bytes_mut())?; match block.magic().from_le() { spec::Magic::LOCAL_FILE_HEADER_SIGNATURE => (), spec::Magic::CENTRAL_DIRECTORY_HEADER_SIGNATURE => return Ok(None), _ => return Err(ZipLocalEntryBlock::WRONG_MAGIC_ERROR), } let block = block.from_le(); let mut result = ZipFileData::from_local_block(block, reader)?; match parse_extra_field(&mut result) { Ok(..) | Err(ZipError::Io(..)) => {} Err(e) => return Err(e), } let limit_reader = (reader as &mut dyn Read).take(result.compressed_size); let result_crc32 = result.crc32; let result_compression_method = result.compression_method; let crypto_reader = make_crypto_reader(&result, limit_reader, None, None)?; Ok(Some(ZipFile { data: Cow::Owned(result), reader: make_reader(result_compression_method, result_crc32, crypto_reader)?, })) } #[cfg(test)] mod test { use crate::result::ZipResult; use crate::write::SimpleFileOptions; use crate::CompressionMethod::Stored; use crate::{ZipArchive, ZipWriter}; use std::io::{Cursor, Read, Write}; use tempfile::TempDir; #[test] fn invalid_offset() { use super::ZipArchive; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/invalid_offset.zip")); let reader = ZipArchive::new(Cursor::new(v)); assert!(reader.is_err()); } #[test] fn invalid_offset2() { use super::ZipArchive; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/invalid_offset2.zip")); let reader = ZipArchive::new(Cursor::new(v)); assert!(reader.is_err()); } #[test] fn zip64_with_leading_junk() { use super::ZipArchive; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/zip64_demo.zip")); let reader = ZipArchive::new(Cursor::new(v)).unwrap(); assert_eq!(reader.len(), 1); } #[test] fn zip_contents() { use super::ZipArchive; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/mimetype.zip")); let mut reader = ZipArchive::new(Cursor::new(v)).unwrap(); assert_eq!(reader.comment(), b""); assert_eq!(reader.by_index(0).unwrap().central_header_start(), 77); } #[test] fn zip_read_streaming() { use super::read_zipfile_from_stream; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/mimetype.zip")); let mut reader = Cursor::new(v); loop { if read_zipfile_from_stream(&mut reader).unwrap().is_none() { break; } } } #[test] fn zip_clone() { use super::ZipArchive; use std::io::Read; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/mimetype.zip")); let mut reader1 = ZipArchive::new(Cursor::new(v)).unwrap(); let mut reader2 = reader1.clone(); let mut file1 = reader1.by_index(0).unwrap(); let mut file2 = reader2.by_index(0).unwrap(); let t = file1.last_modified().unwrap(); assert_eq!( ( t.year(), t.month(), t.day(), t.hour(), t.minute(), t.second() ), (1980, 1, 1, 0, 0, 0) ); let mut buf1 = [0; 5]; let mut buf2 = [0; 5]; let mut buf3 = [0; 5]; let mut buf4 = [0; 5]; file1.read_exact(&mut buf1).unwrap(); file2.read_exact(&mut buf2).unwrap(); file1.read_exact(&mut buf3).unwrap(); file2.read_exact(&mut buf4).unwrap(); assert_eq!(buf1, buf2); assert_eq!(buf3, buf4); assert_ne!(buf1, buf3); } #[test] fn file_and_dir_predicates() { use super::ZipArchive; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/files_and_dirs.zip")); let mut zip = ZipArchive::new(Cursor::new(v)).unwrap(); for i in 0..zip.len() { let zip_file = zip.by_index(i).unwrap(); let full_name = zip_file.enclosed_name().unwrap(); let file_name = full_name.file_name().unwrap().to_str().unwrap(); assert!( (file_name.starts_with("dir") && zip_file.is_dir()) || (file_name.starts_with("file") && zip_file.is_file()) ); } } #[test] fn zip64_magic_in_filenames() { let files = vec![ include_bytes!("../tests/data/zip64_magic_in_filename_1.zip").to_vec(), include_bytes!("../tests/data/zip64_magic_in_filename_2.zip").to_vec(), include_bytes!("../tests/data/zip64_magic_in_filename_3.zip").to_vec(), include_bytes!("../tests/data/zip64_magic_in_filename_4.zip").to_vec(), include_bytes!("../tests/data/zip64_magic_in_filename_5.zip").to_vec(), ]; // Although we don't allow adding files whose names contain the ZIP64 CDB-end or // CDB-end-locator signatures, we still read them when they aren't genuinely ambiguous. for file in files { ZipArchive::new(Cursor::new(file)).unwrap(); } } /// test case to ensure we don't preemptively over allocate based on the /// declared number of files in the CDE of an invalid zip when the number of /// files declared is more than the alleged offset in the CDE #[test] fn invalid_cde_number_of_files_allocation_smaller_offset() { use super::ZipArchive; let mut v = Vec::new(); v.extend_from_slice(include_bytes!( "../tests/data/invalid_cde_number_of_files_allocation_smaller_offset.zip" )); let reader = ZipArchive::new(Cursor::new(v)); assert!(reader.is_err() || reader.unwrap().is_empty()); } /// test case to ensure we don't preemptively over allocate based on the /// declared number of files in the CDE of an invalid zip when the number of /// files declared is less than the alleged offset in the CDE #[test] fn invalid_cde_number_of_files_allocation_greater_offset() { use super::ZipArchive; let mut v = Vec::new(); v.extend_from_slice(include_bytes!( "../tests/data/invalid_cde_number_of_files_allocation_greater_offset.zip" )); let reader = ZipArchive::new(Cursor::new(v)); assert!(reader.is_err()); } #[cfg(feature = "deflate64")] #[test] fn deflate64_index_out_of_bounds() -> std::io::Result<()> { let mut v = Vec::new(); v.extend_from_slice(include_bytes!( "../tests/data/raw_deflate64_index_out_of_bounds.zip" )); let mut reader = ZipArchive::new(Cursor::new(v))?; std::io::copy(&mut reader.by_index(0)?, &mut std::io::sink()).expect_err("Invalid file"); Ok(()) } #[cfg(feature = "deflate64")] #[test] fn deflate64_not_enough_space() { let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/deflate64_issue_25.zip")); ZipArchive::new(Cursor::new(v)).expect_err("Invalid file"); } #[cfg(feature = "_deflate-any")] #[test] fn test_read_with_data_descriptor() { use std::io::Read; let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/data_descriptor.zip")); let mut reader = ZipArchive::new(Cursor::new(v)).unwrap(); let mut decompressed = [0u8; 16]; let mut file = reader.by_index(0).unwrap(); assert_eq!(file.read(&mut decompressed).unwrap(), 12); } #[test] fn test_is_symlink() -> std::io::Result<()> { let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/symlink.zip")); let mut reader = ZipArchive::new(Cursor::new(v)).unwrap(); assert!(reader.by_index(0).unwrap().is_symlink()); let tempdir = TempDir::with_prefix("test_is_symlink")?; reader.extract(&tempdir).unwrap(); assert!(tempdir.path().join("bar").is_symlink()); Ok(()) } #[test] #[cfg(feature = "_deflate-any")] fn test_utf8_extra_field() { let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/chinese.zip")); let mut reader = ZipArchive::new(Cursor::new(v)).unwrap(); reader.by_name("七个房间.txt").unwrap(); } #[test] fn test_utf8() { let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/linux-7z.zip")); let mut reader = ZipArchive::new(Cursor::new(v)).unwrap(); reader.by_name("你好.txt").unwrap(); } #[test] fn test_utf8_2() { let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/windows-7zip.zip")); let mut reader = ZipArchive::new(Cursor::new(v)).unwrap(); reader.by_name("你好.txt").unwrap(); } #[test] fn test_64k_files() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); let options = SimpleFileOptions { compression_method: Stored, ..Default::default() }; for i in 0..=u16::MAX { let file_name = format!("{i}.txt"); writer.start_file(&*file_name, options)?; writer.write_all(i.to_string().as_bytes())?; } let mut reader = ZipArchive::new(writer.finish()?)?; for i in 0..=u16::MAX { let expected_name = format!("{i}.txt"); let expected_contents = i.to_string(); let expected_contents = expected_contents.as_bytes(); let mut file = reader.by_name(&expected_name)?; let mut contents = Vec::with_capacity(expected_contents.len()); file.read_to_end(&mut contents)?; assert_eq!(contents, expected_contents); drop(file); contents.clear(); let mut file = reader.by_index(i as usize)?; file.read_to_end(&mut contents)?; assert_eq!(contents, expected_contents); } Ok(()) } }
#![allow(unknown_lints)] // non_local_definitions isn't in Rust 1.70 #![allow(non_local_definitions)] //! Error types that can be emitted from this library use displaydoc::Display; use thiserror::Error; use std::error::Error; use std::fmt; use std::io; use std::num::TryFromIntError; use std::string::FromUtf8Error; /// Generic result type with ZipError as its error variant pub type ZipResult<T> = Result<T, ZipError>; /// Error type for Zip #[derive(Debug, Display, Error)] #[non_exhaustive] pub enum ZipError { /// i/o error: {0} Io(#[from] io::Error), /// invalid Zip archive: {0} InvalidArchive(&'static str), /// unsupported Zip archive: {0} UnsupportedArchive(&'static str), /// specified file not found in archive FileNotFound, /// The password provided is incorrect InvalidPassword, } impl ZipError { /// The text used as an error when a password is required and not supplied /// /// ```rust,no_run /// # use zip::result::ZipError; /// # let mut archive = zip::ZipArchive::new(std::io::Cursor::new(&[])).unwrap(); /// match archive.by_index(1) { /// Err(ZipError::UnsupportedArchive(ZipError::PASSWORD_REQUIRED)) => eprintln!("a password is needed to unzip this file"), /// _ => (), /// } /// # () /// ``` pub const PASSWORD_REQUIRED: &'static str = "Password required to decrypt file"; } impl From<ZipError> for io::Error { fn from(err: ZipError) -> io::Error { let kind = match &err { ZipError::Io(err) => err.kind(), ZipError::InvalidArchive(_) => io::ErrorKind::InvalidData, ZipError::UnsupportedArchive(_) => io::ErrorKind::Unsupported, ZipError::FileNotFound => io::ErrorKind::NotFound, ZipError::InvalidPassword => io::ErrorKind::InvalidInput, }; io::Error::new(kind, err) } } impl From<DateTimeRangeError> for ZipError { fn from(_: DateTimeRangeError) -> Self { ZipError::InvalidArchive("Invalid date or time") } } impl From<FromUtf8Error> for ZipError { fn from(_: FromUtf8Error) -> Self { ZipError::InvalidArchive("Invalid UTF-8") } } /// Error type for time parsing #[derive(Debug)] pub struct DateTimeRangeError; // TryFromIntError is also an out-of-range error. impl From<TryFromIntError> for DateTimeRangeError { fn from(_value: TryFromIntError) -> Self { DateTimeRangeError } } impl fmt::Display for DateTimeRangeError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!( fmt, "a date could not be represented within the bounds the MS-DOS date range (1980-2107)" ) } } impl Error for DateTimeRangeError {}
#![macro_use] use crate::read::magic_finder::{Backwards, Forward, MagicFinder, OptimisticMagicFinder}; use crate::read::ArchiveOffset; use crate::result::{ZipError, ZipResult}; use core::mem; use std::io; use std::io::prelude::*; use std::slice; /// "Magic" header values used in the zip spec to locate metadata records. /// /// These values currently always take up a fixed four bytes, so we can parse and wrap them in this /// struct to enforce some small amount of type safety. #[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] #[repr(transparent)] pub(crate) struct Magic(u32); impl Magic { pub const fn literal(x: u32) -> Self { Self(x) } #[inline(always)] #[allow(dead_code)] pub const fn from_le_bytes(bytes: [u8; 4]) -> Self { Self(u32::from_le_bytes(bytes)) } #[inline(always)] pub const fn to_le_bytes(self) -> [u8; 4] { self.0.to_le_bytes() } #[allow(clippy::wrong_self_convention)] #[inline(always)] pub fn from_le(self) -> Self { Self(u32::from_le(self.0)) } #[allow(clippy::wrong_self_convention)] #[inline(always)] pub fn to_le(self) -> Self { Self(u32::to_le(self.0)) } pub const LOCAL_FILE_HEADER_SIGNATURE: Self = Self::literal(0x04034b50); pub const CENTRAL_DIRECTORY_HEADER_SIGNATURE: Self = Self::literal(0x02014b50); pub const CENTRAL_DIRECTORY_END_SIGNATURE: Self = Self::literal(0x06054b50); pub const ZIP64_CENTRAL_DIRECTORY_END_SIGNATURE: Self = Self::literal(0x06064b50); pub const ZIP64_CENTRAL_DIRECTORY_END_LOCATOR_SIGNATURE: Self = Self::literal(0x07064b50); } /// Similar to [`Magic`], but used for extra field tags as per section 4.5.3 of APPNOTE.TXT. #[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)] #[repr(transparent)] pub(crate) struct ExtraFieldMagic(u16); /* TODO: maybe try to use this for parsing extra fields as well as writing them? */ #[allow(dead_code)] impl ExtraFieldMagic { pub const fn literal(x: u16) -> Self { Self(x) } #[inline(always)] pub const fn from_le_bytes(bytes: [u8; 2]) -> Self { Self(u16::from_le_bytes(bytes)) } #[inline(always)] pub const fn to_le_bytes(self) -> [u8; 2] { self.0.to_le_bytes() } #[allow(clippy::wrong_self_convention)] #[inline(always)] pub fn from_le(self) -> Self { Self(u16::from_le(self.0)) } #[allow(clippy::wrong_self_convention)] #[inline(always)] pub fn to_le(self) -> Self { Self(u16::to_le(self.0)) } pub const ZIP64_EXTRA_FIELD_TAG: Self = Self::literal(0x0001); } /// The file size at which a ZIP64 record becomes necessary. /// /// If a file larger than this threshold attempts to be written, compressed or uncompressed, and /// [`FileOptions::large_file()`](crate::write::FileOptions) was not true, then [`ZipWriter`] will /// raise an [`io::Error`] with [`io::ErrorKind::Other`]. /// /// If the zip file itself is larger than this value, then a zip64 central directory record will be /// written to the end of the file. /// ///``` /// # fn main() -> Result<(), zip::result::ZipError> { /// use std::io::{self, Cursor, prelude::*}; /// use std::error::Error; /// use zip::{ZipWriter, write::SimpleFileOptions}; /// /// let mut zip = ZipWriter::new(Cursor::new(Vec::new())); /// // Writing an extremely large file for this test is faster without compression. /// let options = SimpleFileOptions::default().compression_method(zip::CompressionMethod::Stored); /// /// let big_len: usize = (zip::ZIP64_BYTES_THR as usize) + 1; /// let big_buf = vec![0u8; big_len]; /// zip.start_file("zero.dat", options)?; /// // This is too big! /// let res = zip.write_all(&big_buf[..]).err().unwrap(); /// assert_eq!(res.kind(), io::ErrorKind::Other); /// let description = format!("{}", &res); /// assert_eq!(description, "Large file option has not been set"); /// // Attempting to write anything further to the same zip will still succeed, but the previous /// // failing entry has been removed. /// zip.start_file("one.dat", options)?; /// let zip = zip.finish_into_readable()?; /// let names: Vec<_> = zip.file_names().collect(); /// assert_eq!(&names, &["one.dat"]); /// /// // Create a new zip output. /// let mut zip = ZipWriter::new(Cursor::new(Vec::new())); /// // This time, create a zip64 record for the file. /// let options = options.large_file(true); /// zip.start_file("zero.dat", options)?; /// // This succeeds because we specified that it could be a large file. /// assert!(zip.write_all(&big_buf[..]).is_ok()); /// # Ok(()) /// # } ///``` pub const ZIP64_BYTES_THR: u64 = u32::MAX as u64; /// The number of entries within a single zip necessary to allocate a zip64 central /// directory record. /// /// If more than this number of entries is written to a [`ZipWriter`], then [`ZipWriter::finish()`] /// will write out extra zip64 data to the end of the zip file. pub const ZIP64_ENTRY_THR: usize = u16::MAX as usize; /// # Safety /// /// - No padding/uninit bytes /// - All bytes patterns must be valid /// - No cell, pointers /// /// See `bytemuck::Pod` for more details. pub(crate) unsafe trait Pod: Copy + 'static { #[inline] fn zeroed() -> Self { unsafe { mem::zeroed() } } #[inline] fn as_bytes(&self) -> &[u8] { unsafe { slice::from_raw_parts(self as *const Self as *const u8, mem::size_of::<Self>()) } } #[inline] fn as_bytes_mut(&mut self) -> &mut [u8] { unsafe { slice::from_raw_parts_mut(self as *mut Self as *mut u8, mem::size_of::<Self>()) } } } pub(crate) trait FixedSizeBlock: Pod { const MAGIC: Magic; fn magic(self) -> Magic; const WRONG_MAGIC_ERROR: ZipError; #[allow(clippy::wrong_self_convention)] fn from_le(self) -> Self; fn parse<R: Read>(reader: &mut R) -> ZipResult<Self> { let mut block = Self::zeroed(); reader.read_exact(block.as_bytes_mut())?; let block = Self::from_le(block); if block.magic() != Self::MAGIC { return Err(Self::WRONG_MAGIC_ERROR); } Ok(block) } fn to_le(self) -> Self; fn write<T: Write>(self, writer: &mut T) -> ZipResult<()> { let block = self.to_le(); writer.write_all(block.as_bytes())?; Ok(()) } } /// Convert all the fields of a struct *from* little-endian representations. macro_rules! from_le { ($obj:ident, $field:ident, $type:ty) => { $obj.$field = <$type>::from_le($obj.$field); }; ($obj:ident, [($field:ident, $type:ty) $(,)?]) => { from_le![$obj, $field, $type]; }; ($obj:ident, [($field:ident, $type:ty), $($rest:tt),+ $(,)?]) => { from_le![$obj, $field, $type]; from_le!($obj, [$($rest),+]); }; } /// Convert all the fields of a struct *into* little-endian representations. macro_rules! to_le { ($obj:ident, $field:ident, $type:ty) => { $obj.$field = <$type>::to_le($obj.$field); }; ($obj:ident, [($field:ident, $type:ty) $(,)?]) => { to_le![$obj, $field, $type]; }; ($obj:ident, [($field:ident, $type:ty), $($rest:tt),+ $(,)?]) => { to_le![$obj, $field, $type]; to_le!($obj, [$($rest),+]); }; } /* TODO: derive macro to generate these fields? */ /// Implement `from_le()` and `to_le()`, providing the field specification to both macros /// and methods. macro_rules! to_and_from_le { ($($args:tt),+ $(,)?) => { #[inline(always)] fn from_le(mut self) -> Self { from_le![self, [$($args),+]]; self } #[inline(always)] fn to_le(mut self) -> Self { to_le![self, [$($args),+]]; self } }; } #[derive(Copy, Clone, Debug)] #[repr(packed, C)] pub(crate) struct Zip32CDEBlock { magic: Magic, pub disk_number: u16, pub disk_with_central_directory: u16, pub number_of_files_on_this_disk: u16, pub number_of_files: u16, pub central_directory_size: u32, pub central_directory_offset: u32, pub zip_file_comment_length: u16, } unsafe impl Pod for Zip32CDEBlock {} impl FixedSizeBlock for Zip32CDEBlock { const MAGIC: Magic = Magic::CENTRAL_DIRECTORY_END_SIGNATURE; #[inline(always)] fn magic(self) -> Magic { self.magic } const WRONG_MAGIC_ERROR: ZipError = ZipError::InvalidArchive("Invalid digital signature header"); to_and_from_le![ (magic, Magic), (disk_number, u16), (disk_with_central_directory, u16), (number_of_files_on_this_disk, u16), (number_of_files, u16), (central_directory_size, u32), (central_directory_offset, u32), (zip_file_comment_length, u16) ]; } #[derive(Debug)] pub(crate) struct Zip32CentralDirectoryEnd { pub disk_number: u16, pub disk_with_central_directory: u16, pub number_of_files_on_this_disk: u16, pub number_of_files: u16, pub central_directory_size: u32, pub central_directory_offset: u32, pub zip_file_comment: Box<[u8]>, } impl Zip32CentralDirectoryEnd { fn into_block_and_comment(self) -> (Zip32CDEBlock, Box<[u8]>) { let Self { disk_number, disk_with_central_directory, number_of_files_on_this_disk, number_of_files, central_directory_size, central_directory_offset, zip_file_comment, } = self; let block = Zip32CDEBlock { magic: Zip32CDEBlock::MAGIC, disk_number, disk_with_central_directory, number_of_files_on_this_disk, number_of_files, central_directory_size, central_directory_offset, zip_file_comment_length: zip_file_comment.len() as u16, }; (block, zip_file_comment) } pub fn parse<T: Read>(reader: &mut T) -> ZipResult<Zip32CentralDirectoryEnd> { let Zip32CDEBlock { // magic, disk_number, disk_with_central_directory, number_of_files_on_this_disk, number_of_files, central_directory_size, central_directory_offset, zip_file_comment_length, .. } = Zip32CDEBlock::parse(reader)?; let mut zip_file_comment = vec![0u8; zip_file_comment_length as usize].into_boxed_slice(); if let Err(e) = reader.read_exact(&mut zip_file_comment) { if e.kind() == io::ErrorKind::UnexpectedEof { return Err(ZipError::InvalidArchive( "EOCD comment exceeds file boundary", )); } return Err(e.into()); } Ok(Zip32CentralDirectoryEnd { disk_number, disk_with_central_directory, number_of_files_on_this_disk, number_of_files, central_directory_size, central_directory_offset, zip_file_comment, }) } pub fn write<T: Write>(self, writer: &mut T) -> ZipResult<()> { let (block, comment) = self.into_block_and_comment(); if comment.len() > u16::MAX as usize { return Err(ZipError::InvalidArchive( "EOCD comment length exceeds u16::MAX", )); } block.write(writer)?; writer.write_all(&comment)?; Ok(()) } pub fn may_be_zip64(&self) -> bool { self.number_of_files == u16::MAX || self.central_directory_offset == u32::MAX } } #[derive(Copy, Clone)] #[repr(packed, C)] pub(crate) struct Zip64CDELocatorBlock { magic: Magic, pub disk_with_central_directory: u32, pub end_of_central_directory_offset: u64, pub number_of_disks: u32, } unsafe impl Pod for Zip64CDELocatorBlock {} impl FixedSizeBlock for Zip64CDELocatorBlock { const MAGIC: Magic = Magic::ZIP64_CENTRAL_DIRECTORY_END_LOCATOR_SIGNATURE; #[inline(always)] fn magic(self) -> Magic { self.magic } const WRONG_MAGIC_ERROR: ZipError = ZipError::InvalidArchive("Invalid zip64 locator digital signature header"); to_and_from_le![ (magic, Magic), (disk_with_central_directory, u32), (end_of_central_directory_offset, u64), (number_of_disks, u32), ]; } pub(crate) struct Zip64CentralDirectoryEndLocator { pub disk_with_central_directory: u32, pub end_of_central_directory_offset: u64, pub number_of_disks: u32, } impl Zip64CentralDirectoryEndLocator { pub fn parse<T: Read>(reader: &mut T) -> ZipResult<Zip64CentralDirectoryEndLocator> { let Zip64CDELocatorBlock { // magic, disk_with_central_directory, end_of_central_directory_offset, number_of_disks, .. } = Zip64CDELocatorBlock::parse(reader)?; Ok(Zip64CentralDirectoryEndLocator { disk_with_central_directory, end_of_central_directory_offset, number_of_disks, }) } pub fn block(self) -> Zip64CDELocatorBlock { let Self { disk_with_central_directory, end_of_central_directory_offset, number_of_disks, } = self; Zip64CDELocatorBlock { magic: Zip64CDELocatorBlock::MAGIC, disk_with_central_directory, end_of_central_directory_offset, number_of_disks, } } pub fn write<T: Write>(self, writer: &mut T) -> ZipResult<()> { self.block().write(writer) } } #[derive(Copy, Clone)] #[repr(packed, C)] pub(crate) struct Zip64CDEBlock { magic: Magic, pub record_size: u64, pub version_made_by: u16, pub version_needed_to_extract: u16, pub disk_number: u32, pub disk_with_central_directory: u32, pub number_of_files_on_this_disk: u64, pub number_of_files: u64, pub central_directory_size: u64, pub central_directory_offset: u64, } unsafe impl Pod for Zip64CDEBlock {} impl FixedSizeBlock for Zip64CDEBlock { const MAGIC: Magic = Magic::ZIP64_CENTRAL_DIRECTORY_END_SIGNATURE; fn magic(self) -> Magic { self.magic } const WRONG_MAGIC_ERROR: ZipError = ZipError::InvalidArchive("Invalid digital signature header"); to_and_from_le![ (magic, Magic), (record_size, u64), (version_made_by, u16), (version_needed_to_extract, u16), (disk_number, u32), (disk_with_central_directory, u32), (number_of_files_on_this_disk, u64), (number_of_files, u64), (central_directory_size, u64), (central_directory_offset, u64), ]; } pub(crate) struct Zip64CentralDirectoryEnd { pub record_size: u64, pub version_made_by: u16, pub version_needed_to_extract: u16, pub disk_number: u32, pub disk_with_central_directory: u32, pub number_of_files_on_this_disk: u64, pub number_of_files: u64, pub central_directory_size: u64, pub central_directory_offset: u64, pub extensible_data_sector: Box<[u8]>, } impl Zip64CentralDirectoryEnd { pub fn parse<T: Read>(reader: &mut T, max_size: u64) -> ZipResult<Zip64CentralDirectoryEnd> { let Zip64CDEBlock { record_size, version_made_by, version_needed_to_extract, disk_number, disk_with_central_directory, number_of_files_on_this_disk, number_of_files, central_directory_size, central_directory_offset, .. } = Zip64CDEBlock::parse(reader)?; if record_size < 44 { return Err(ZipError::InvalidArchive("Low EOCD64 record size")); } else if record_size.saturating_add(12) > max_size { return Err(ZipError::InvalidArchive( "EOCD64 extends beyond EOCD64 locator", )); } let mut zip_file_comment = vec![0u8; record_size as usize - 44].into_boxed_slice(); reader.read_exact(&mut zip_file_comment)?; Ok(Self { record_size, version_made_by, version_needed_to_extract, disk_number, disk_with_central_directory, number_of_files_on_this_disk, number_of_files, central_directory_size, central_directory_offset, extensible_data_sector: zip_file_comment, }) } pub fn into_block_and_comment(self) -> (Zip64CDEBlock, Box<[u8]>) { let Self { record_size, version_made_by, version_needed_to_extract, disk_number, disk_with_central_directory, number_of_files_on_this_disk, number_of_files, central_directory_size, central_directory_offset, extensible_data_sector, } = self; ( Zip64CDEBlock { magic: Zip64CDEBlock::MAGIC, record_size, version_made_by, version_needed_to_extract, disk_number, disk_with_central_directory, number_of_files_on_this_disk, number_of_files, central_directory_size, central_directory_offset, }, extensible_data_sector, ) } pub fn write<T: Write>(self, writer: &mut T) -> ZipResult<()> { let (block, comment) = self.into_block_and_comment(); block.write(writer)?; writer.write_all(&comment)?; Ok(()) } } pub(crate) struct DataAndPosition<T> { pub data: T, #[allow(dead_code)] pub position: u64, } impl<T> From<(T, u64)> for DataAndPosition<T> { fn from(value: (T, u64)) -> Self { Self { data: value.0, position: value.1, } } } pub(crate) struct CentralDirectoryEndInfo { pub eocd: DataAndPosition<Zip32CentralDirectoryEnd>, pub eocd64: Option<DataAndPosition<Zip64CentralDirectoryEnd>>, pub archive_offset: u64, } /// Finds the EOCD and possibly the EOCD64 block and determines the archive offset. /// /// In the best case scenario (no prepended junk), this function will not backtrack /// in the reader. pub(crate) fn find_central_directory<R: Read + Seek>( reader: &mut R, archive_offset: ArchiveOffset, end_exclusive: u64, file_len: u64, ) -> ZipResult<CentralDirectoryEndInfo> { const EOCD_SIG_BYTES: [u8; mem::size_of::<Magic>()] = Magic::CENTRAL_DIRECTORY_END_SIGNATURE.to_le_bytes(); const EOCD64_SIG_BYTES: [u8; mem::size_of::<Magic>()] = Magic::ZIP64_CENTRAL_DIRECTORY_END_SIGNATURE.to_le_bytes(); const CDFH_SIG_BYTES: [u8; mem::size_of::<Magic>()] = Magic::CENTRAL_DIRECTORY_HEADER_SIGNATURE.to_le_bytes(); // Instantiate the mandatory finder let mut eocd_finder = MagicFinder::<Backwards<'static>>::new(&EOCD_SIG_BYTES, 0, end_exclusive); let mut subfinder: Option<OptimisticMagicFinder<Forward<'static>>> = None; // Keep the last errors for cases of improper EOCD instances. let mut parsing_error = None; while let Some(eocd_offset) = eocd_finder.next(reader)? { // Attempt to parse the EOCD block let eocd = match Zip32CentralDirectoryEnd::parse(reader) { Ok(eocd) => eocd, Err(e) => { if parsing_error.is_none() { parsing_error = Some(e); } continue; } }; // ! Relaxed (inequality) due to garbage-after-comment Python files // Consistency check: the EOCD comment must terminate before the end of file if eocd.zip_file_comment.len() as u64 + eocd_offset + 22 > file_len { parsing_error = Some(ZipError::InvalidArchive("Invalid EOCD comment length")); continue; } let zip64_metadata = if eocd.may_be_zip64() { fn try_read_eocd64_locator( reader: &mut (impl Read + Seek), eocd_offset: u64, ) -> ZipResult<(u64, Zip64CentralDirectoryEndLocator)> { if eocd_offset < mem::size_of::<Zip64CDELocatorBlock>() as u64 { return Err(ZipError::InvalidArchive( "EOCD64 Locator does not fit in file", )); } let locator64_offset = eocd_offset - mem::size_of::<Zip64CDELocatorBlock>() as u64; reader.seek(io::SeekFrom::Start(locator64_offset))?; Ok(( locator64_offset, Zip64CentralDirectoryEndLocator::parse(reader)?, )) } try_read_eocd64_locator(reader, eocd_offset).ok() } else { None }; let Some((locator64_offset, locator64)) = zip64_metadata else { // Branch out for zip32 let relative_cd_offset = eocd.central_directory_offset as u64; // If the archive is empty, there is nothing more to be checked, the archive is correct. if eocd.number_of_files == 0 { return Ok(CentralDirectoryEndInfo { eocd: (eocd, eocd_offset).into(), eocd64: None, archive_offset: eocd_offset.saturating_sub(relative_cd_offset), }); } // Consistency check: the CD relative offset cannot be after the EOCD if relative_cd_offset >= eocd_offset { parsing_error = Some(ZipError::InvalidArchive("Invalid CDFH offset in EOCD")); continue; } // Attempt to find the first CDFH let subfinder = subfinder .get_or_insert_with(OptimisticMagicFinder::new_empty) .repurpose( &CDFH_SIG_BYTES, // The CDFH must be before the EOCD and after the relative offset, // because prepended junk can only move it forward. (relative_cd_offset, eocd_offset), match archive_offset { ArchiveOffset::Known(n) => { Some((relative_cd_offset.saturating_add(n).min(eocd_offset), true)) } _ => Some((relative_cd_offset, false)), }, ); // Consistency check: find the first CDFH if let Some(cd_offset) = subfinder.next(reader)? { // The first CDFH will define the archive offset let archive_offset = cd_offset - relative_cd_offset; return Ok(CentralDirectoryEndInfo { eocd: (eocd, eocd_offset).into(), eocd64: None, archive_offset, }); } parsing_error = Some(ZipError::InvalidArchive("No CDFH found")); continue; }; // Consistency check: the EOCD64 offset must be before EOCD64 Locator offset */ if locator64.end_of_central_directory_offset >= locator64_offset { parsing_error = Some(ZipError::InvalidArchive("Invalid EOCD64 Locator CD offset")); continue; } if locator64.number_of_disks > 1 { parsing_error = Some(ZipError::InvalidArchive( "Multi-disk ZIP files are not supported", )); continue; } // This was hidden inside a function to collect errors in a single place. // Once try blocks are stabilized, this can go away. fn try_read_eocd64<R: Read + Seek>( reader: &mut R, locator64: &Zip64CentralDirectoryEndLocator, expected_length: u64, ) -> ZipResult<Zip64CentralDirectoryEnd> { let z64 = Zip64CentralDirectoryEnd::parse(reader, expected_length)?; // Consistency check: EOCD64 locator should agree with the EOCD64 if z64.disk_with_central_directory != locator64.disk_with_central_directory { return Err(ZipError::InvalidArchive( "Invalid EOCD64: inconsistency with Locator data", )); } // Consistency check: the EOCD64 must have the expected length if z64.record_size + 12 != expected_length { return Err(ZipError::InvalidArchive( "Invalid EOCD64: inconsistent length", )); } Ok(z64) } // Attempt to find the EOCD64 with an initial guess let subfinder = subfinder .get_or_insert_with(OptimisticMagicFinder::new_empty) .repurpose( &EOCD64_SIG_BYTES, (locator64.end_of_central_directory_offset, locator64_offset), match archive_offset { ArchiveOffset::Known(n) => Some(( locator64 .end_of_central_directory_offset .saturating_add(n) .min(locator64_offset), true, )), _ => Some((locator64.end_of_central_directory_offset, false)), }, ); // Consistency check: Find the EOCD64 let mut local_error = None; while let Some(eocd64_offset) = subfinder.next(reader)? { let archive_offset = eocd64_offset - locator64.end_of_central_directory_offset; match try_read_eocd64( reader, &locator64, locator64_offset.saturating_sub(eocd64_offset), ) { Ok(eocd64) => { if eocd64_offset < eocd64 .number_of_files .saturating_mul( mem::size_of::<crate::types::ZipCentralEntryBlock>() as u64 ) .saturating_add(eocd64.central_directory_offset) { local_error = Some(ZipError::InvalidArchive( "Invalid EOCD64: inconsistent number of files", )); continue; } return Ok(CentralDirectoryEndInfo { eocd: (eocd, eocd_offset).into(), eocd64: Some((eocd64, eocd64_offset).into()), archive_offset, }); } Err(e) => { local_error = Some(e); } } } parsing_error = local_error.or(Some(ZipError::InvalidArchive("Could not find EOCD64"))); } Err(parsing_error.unwrap_or(ZipError::InvalidArchive("Could not find EOCD"))) } pub(crate) fn is_dir(filename: &str) -> bool { filename .chars() .next_back() .is_some_and(|c| c == '/' || c == '\\') } #[cfg(test)] mod test { use super::*; use std::io::Cursor; #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] #[repr(packed, C)] pub struct TestBlock { magic: Magic, pub file_name_length: u16, } unsafe impl Pod for TestBlock {} impl FixedSizeBlock for TestBlock { const MAGIC: Magic = Magic::literal(0x01111); fn magic(self) -> Magic { self.magic } const WRONG_MAGIC_ERROR: ZipError = ZipError::InvalidArchive("unreachable"); to_and_from_le![(magic, Magic), (file_name_length, u16)]; } /// Demonstrate that a block object can be safely written to memory and deserialized back out. #[test] fn block_serde() { let block = TestBlock { magic: TestBlock::MAGIC, file_name_length: 3, }; let mut c = Cursor::new(Vec::new()); block.write(&mut c).unwrap(); c.set_position(0); let block2 = TestBlock::parse(&mut c).unwrap(); assert_eq!(block, block2); } }
//! Types that specify what is contained in a ZIP. use crate::cp437::FromCp437; use crate::write::{FileOptionExtension, FileOptions}; use path::{Component, Path, PathBuf}; use std::cmp::Ordering; use std::fmt; use std::fmt::{Debug, Formatter}; use std::mem; use std::path; use std::sync::{Arc, OnceLock}; #[cfg(feature = "chrono")] use chrono::{Datelike, NaiveDate, NaiveDateTime, NaiveTime, Timelike}; use crate::result::{ZipError, ZipResult}; use crate::spec::{self, FixedSizeBlock, Pod}; pub(crate) mod ffi { pub const S_IFDIR: u32 = 0o0040000; pub const S_IFREG: u32 = 0o0100000; pub const S_IFLNK: u32 = 0o0120000; } use crate::extra_fields::ExtraField; use crate::result::DateTimeRangeError; use crate::spec::is_dir; use crate::types::ffi::S_IFDIR; use crate::{CompressionMethod, ZIP64_BYTES_THR}; #[cfg(feature = "time")] use time::{error::ComponentRange, Date, Month, OffsetDateTime, PrimitiveDateTime, Time}; pub(crate) struct ZipRawValues { pub(crate) crc32: u32, pub(crate) compressed_size: u64, pub(crate) uncompressed_size: u64, } #[derive(Clone, Copy, Debug, PartialEq, Eq, Default)] #[repr(u8)] pub enum System { Dos = 0, Unix = 3, #[default] Unknown, } impl From<u8> for System { fn from(system: u8) -> Self { match system { 0 => Self::Dos, 3 => Self::Unix, _ => Self::Unknown, } } } impl From<System> for u8 { fn from(system: System) -> Self { match system { System::Dos => 0, System::Unix => 3, System::Unknown => 4, } } } /// Representation of a moment in time. /// /// Zip files use an old format from DOS to store timestamps, /// with its own set of peculiarities. /// For example, it has a resolution of 2 seconds! /// /// A [`DateTime`] can be stored directly in a zipfile with [`FileOptions::last_modified_time`], /// or read from one with [`ZipFile::last_modified`](crate::read::ZipFile::last_modified). /// /// # Warning /// /// Because there is no timezone associated with the [`DateTime`], they should ideally only /// be used for user-facing descriptions. /// /// Modern zip files store more precise timestamps; see [`crate::extra_fields::ExtendedTimestamp`] /// for details. #[derive(Clone, Copy, Eq, Hash, PartialEq)] pub struct DateTime { datepart: u16, timepart: u16, } impl Debug for DateTime { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { if *self == Self::default() { return f.write_str("DateTime::default()"); } f.write_fmt(format_args!( "DateTime::from_date_and_time({}, {}, {}, {}, {}, {})?", self.year(), self.month(), self.day(), self.hour(), self.minute(), self.second() )) } } impl Ord for DateTime { fn cmp(&self, other: &Self) -> Ordering { if let ord @ (Ordering::Less | Ordering::Greater) = self.year().cmp(&other.year()) { return ord; } if let ord @ (Ordering::Less | Ordering::Greater) = self.month().cmp(&other.month()) { return ord; } if let ord @ (Ordering::Less | Ordering::Greater) = self.day().cmp(&other.day()) { return ord; } if let ord @ (Ordering::Less | Ordering::Greater) = self.hour().cmp(&other.hour()) { return ord; } if let ord @ (Ordering::Less | Ordering::Greater) = self.minute().cmp(&other.minute()) { return ord; } self.second().cmp(&other.second()) } } impl PartialOrd for DateTime { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl DateTime { /// Returns the current time if possible, otherwise the default of 1980-01-01. #[cfg(feature = "time")] pub fn default_for_write() -> Self { OffsetDateTime::now_utc() .try_into() .unwrap_or_else(|_| DateTime::default()) } /// Returns the current time if possible, otherwise the default of 1980-01-01. #[cfg(not(feature = "time"))] pub fn default_for_write() -> Self { DateTime::default() } } #[cfg(fuzzing)] impl arbitrary::Arbitrary<'_> for DateTime { fn arbitrary(u: &mut arbitrary::Unstructured) -> arbitrary::Result<Self> { let year: u16 = u.int_in_range(1980..=2107)?; let month: u16 = u.int_in_range(1..=12)?; let day: u16 = u.int_in_range(1..=31)?; let datepart = day | (month << 5) | ((year - 1980) << 9); let hour: u16 = u.int_in_range(0..=23)?; let minute: u16 = u.int_in_range(0..=59)?; let second: u16 = u.int_in_range(0..=58)?; let timepart = (second >> 1) | (minute << 5) | (hour << 11); Ok(DateTime { datepart, timepart }) } } #[cfg(feature = "chrono")] impl TryFrom<NaiveDateTime> for DateTime { type Error = DateTimeRangeError; fn try_from(value: NaiveDateTime) -> Result<Self, Self::Error> { DateTime::from_date_and_time( value.year().try_into()?, value.month().try_into()?, value.day().try_into()?, value.hour().try_into()?, value.minute().try_into()?, value.second().try_into()?, ) } } #[cfg(feature = "chrono")] impl TryFrom<DateTime> for NaiveDateTime { type Error = DateTimeRangeError; fn try_from(value: DateTime) -> Result<Self, Self::Error> { let date = NaiveDate::from_ymd_opt( value.year().into(), value.month().into(), value.day().into(), ) .ok_or(DateTimeRangeError)?; let time = NaiveTime::from_hms_opt( value.hour().into(), value.minute().into(), value.second().into(), ) .ok_or(DateTimeRangeError)?; Ok(NaiveDateTime::new(date, time)) } } impl TryFrom<(u16, u16)> for DateTime { type Error = DateTimeRangeError; #[inline] fn try_from(values: (u16, u16)) -> Result<Self, Self::Error> { Self::try_from_msdos(values.0, values.1) } } impl From<DateTime> for (u16, u16) { #[inline] fn from(dt: DateTime) -> Self { (dt.datepart(), dt.timepart()) } } impl Default for DateTime { /// Constructs an 'default' datetime of 1980-01-01 00:00:00 fn default() -> DateTime { DateTime { datepart: 0b0000000000100001, timepart: 0, } } } impl fmt::Display for DateTime { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "{:04}-{:02}-{:02} {:02}:{:02}:{:02}", self.year(), self.month(), self.day(), self.hour(), self.minute(), self.second() ) } } impl DateTime { /// Converts an msdos (u16, u16) pair to a DateTime object /// /// # Safety /// The caller must ensure the date and time are valid. pub const unsafe fn from_msdos_unchecked(datepart: u16, timepart: u16) -> DateTime { DateTime { datepart, timepart } } /// Converts an msdos (u16, u16) pair to a DateTime object if it represents a valid date and /// time. pub fn try_from_msdos(datepart: u16, timepart: u16) -> Result<DateTime, DateTimeRangeError> { let seconds = (timepart & 0b0000000000011111) << 1; let minutes = (timepart & 0b0000011111100000) >> 5; let hours = (timepart & 0b1111100000000000) >> 11; let days = datepart & 0b0000000000011111; let months = (datepart & 0b0000000111100000) >> 5; let years = (datepart & 0b1111111000000000) >> 9; Self::from_date_and_time( years.checked_add(1980).ok_or(DateTimeRangeError)?, months.try_into()?, days.try_into()?, hours.try_into()?, minutes.try_into()?, seconds.try_into()?, ) } /// Constructs a DateTime from a specific date and time /// /// The bounds are: /// * year: [1980, 2107] /// * month: [1, 12] /// * day: [1, 28..=31] /// * hour: [0, 23] /// * minute: [0, 59] /// * second: [0, 58] pub fn from_date_and_time( year: u16, month: u8, day: u8, hour: u8, minute: u8, second: u8, ) -> Result<DateTime, DateTimeRangeError> { fn is_leap_year(year: u16) -> bool { (year % 4 == 0) && ((year % 25 != 0) || (year % 16 == 0)) } if (1980..=2107).contains(&year) && (1..=12).contains(&month) && (1..=31).contains(&day) && hour <= 23 && minute <= 59 && second <= 60 { let second = second.min(58); // exFAT can't store leap seconds let max_day = match month { 1 | 3 | 5 | 7 | 8 | 10 | 12 => 31, 4 | 6 | 9 | 11 => 30, 2 if is_leap_year(year) => 29, 2 => 28, _ => unreachable!(), }; if day > max_day { return Err(DateTimeRangeError); } let datepart = (day as u16) | ((month as u16) << 5) | ((year - 1980) << 9); let timepart = ((second as u16) >> 1) | ((minute as u16) << 5) | ((hour as u16) << 11); Ok(DateTime { datepart, timepart }) } else { Err(DateTimeRangeError) } } /// Indicates whether this date and time can be written to a zip archive. pub fn is_valid(&self) -> bool { Self::try_from_msdos(self.datepart, self.timepart).is_ok() } #[cfg(feature = "time")] /// Converts a OffsetDateTime object to a DateTime /// /// Returns `Err` when this object is out of bounds #[deprecated(since = "0.6.4", note = "use `DateTime::try_from()` instead")] pub fn from_time(dt: OffsetDateTime) -> Result<DateTime, DateTimeRangeError> { dt.try_into() } /// Gets the time portion of this datetime in the msdos representation pub const fn timepart(&self) -> u16 { self.timepart } /// Gets the date portion of this datetime in the msdos representation pub const fn datepart(&self) -> u16 { self.datepart } #[cfg(feature = "time")] /// Converts the DateTime to a OffsetDateTime structure #[deprecated(since = "1.3.1", note = "use `OffsetDateTime::try_from()` instead")] pub fn to_time(&self) -> Result<OffsetDateTime, ComponentRange> { (*self).try_into() } /// Get the year. There is no epoch, i.e. 2018 will be returned as 2018. pub const fn year(&self) -> u16 { (self.datepart >> 9) + 1980 } /// Get the month, where 1 = january and 12 = december /// /// # Warning /// /// When read from a zip file, this may not be a reasonable value pub const fn month(&self) -> u8 { ((self.datepart & 0b0000000111100000) >> 5) as u8 } /// Get the day /// /// # Warning /// /// When read from a zip file, this may not be a reasonable value pub const fn day(&self) -> u8 { (self.datepart & 0b0000000000011111) as u8 } /// Get the hour /// /// # Warning /// /// When read from a zip file, this may not be a reasonable value pub const fn hour(&self) -> u8 { (self.timepart >> 11) as u8 } /// Get the minute /// /// # Warning /// /// When read from a zip file, this may not be a reasonable value pub const fn minute(&self) -> u8 { ((self.timepart & 0b0000011111100000) >> 5) as u8 } /// Get the second /// /// # Warning /// /// When read from a zip file, this may not be a reasonable value pub const fn second(&self) -> u8 { ((self.timepart & 0b0000000000011111) << 1) as u8 } } #[cfg(feature = "time")] impl TryFrom<OffsetDateTime> for DateTime { type Error = DateTimeRangeError; fn try_from(dt: OffsetDateTime) -> Result<Self, Self::Error> { Self::from_date_and_time( dt.year().try_into()?, dt.month().into(), dt.day(), dt.hour(), dt.minute(), dt.second(), ) } } #[cfg(feature = "time")] impl TryFrom<DateTime> for OffsetDateTime { type Error = ComponentRange; fn try_from(dt: DateTime) -> Result<Self, Self::Error> { let date = Date::from_calendar_date(dt.year() as i32, Month::try_from(dt.month())?, dt.day())?; let time = Time::from_hms(dt.hour(), dt.minute(), dt.second())?; Ok(PrimitiveDateTime::new(date, time).assume_utc()) } } pub const MIN_VERSION: u8 = 10; pub const DEFAULT_VERSION: u8 = 45; /// Structure representing a ZIP file. #[derive(Debug, Clone, Default)] pub struct ZipFileData { /// Compatibility of the file attribute information pub system: System, /// Specification version pub version_made_by: u8, /// True if the file is encrypted. pub encrypted: bool, /// True if file_name and file_comment are UTF8 pub is_utf8: bool, /// True if the file uses a data-descriptor section pub using_data_descriptor: bool, /// Compression method used to store the file pub compression_method: crate::compression::CompressionMethod, /// Compression level to store the file pub compression_level: Option<i64>, /// Last modified time. This will only have a 2 second precision. pub last_modified_time: Option<DateTime>, /// CRC32 checksum pub crc32: u32, /// Size of the file in the ZIP pub compressed_size: u64, /// Size of the file when extracted pub uncompressed_size: u64, /// Name of the file pub file_name: Box<str>, /// Raw file name. To be used when file_name was incorrectly decoded. pub file_name_raw: Box<[u8]>, /// Extra field usually used for storage expansion pub extra_field: Option<Arc<Vec<u8>>>, /// Extra field only written to central directory pub central_extra_field: Option<Arc<Vec<u8>>>, /// File comment pub file_comment: Box<str>, /// Specifies where the local header of the file starts pub header_start: u64, /// Specifies where the extra data of the file starts pub extra_data_start: Option<u64>, /// Specifies where the central header of the file starts /// /// Note that when this is not known, it is set to 0 pub central_header_start: u64, /// Specifies where the compressed data of the file starts pub data_start: OnceLock<u64>, /// External file attributes pub external_attributes: u32, /// Reserve local ZIP64 extra field pub large_file: bool, /// AES mode if applicable pub aes_mode: Option<(AesMode, AesVendorVersion, CompressionMethod)>, /// Specifies where in the extra data the AES metadata starts pub aes_extra_data_start: u64, /// extra fields, see <https://libzip.org/specifications/extrafld.txt> pub extra_fields: Vec<ExtraField>, } impl ZipFileData { /// Get the starting offset of the data of the compressed file pub fn data_start(&self) -> u64 { *self.data_start.get().unwrap() } #[allow(dead_code)] pub fn is_dir(&self) -> bool { is_dir(&self.file_name) } pub fn file_name_sanitized(&self) -> PathBuf { let no_null_filename = match self.file_name.find('\0') { Some(index) => &self.file_name[0..index], None => &self.file_name, } .to_string(); // zip files can contain both / and \ as separators regardless of the OS // and as we want to return a sanitized PathBuf that only supports the // OS separator let's convert incompatible separators to compatible ones let separator = path::MAIN_SEPARATOR; let opposite_separator = match separator { '/' => '\\', _ => '/', }; let filename = no_null_filename.replace(&opposite_separator.to_string(), &separator.to_string()); Path::new(&filename) .components() .filter(|component| matches!(*component, Component::Normal(..))) .fold(PathBuf::new(), |mut path, ref cur| { path.push(cur.as_os_str()); path }) } pub(crate) fn enclosed_name(&self) -> Option<PathBuf> { if self.file_name.contains('\0') { return None; } let path = PathBuf::from(self.file_name.to_string()); let mut depth = 0usize; for component in path.components() { match component { Component::Prefix(_) | Component::RootDir => return None, Component::ParentDir => depth = depth.checked_sub(1)?, Component::Normal(_) => depth += 1, Component::CurDir => (), } } Some(path) } /// Get unix mode for the file pub(crate) const fn unix_mode(&self) -> Option<u32> { if self.external_attributes == 0 { return None; } match self.system { System::Unix => Some(self.external_attributes >> 16), System::Dos => { // Interpret MS-DOS directory bit let mut mode = if 0x10 == (self.external_attributes & 0x10) { ffi::S_IFDIR | 0o0775 } else { ffi::S_IFREG | 0o0664 }; if 0x01 == (self.external_attributes & 0x01) { // Read-only bit; strip write permissions mode &= 0o0555; } Some(mode) } _ => None, } } /// PKZIP version needed to open this file (from APPNOTE 4.4.3.2). pub fn version_needed(&self) -> u16 { let compression_version: u16 = match self.compression_method { CompressionMethod::Stored => MIN_VERSION.into(), #[cfg(feature = "_deflate-any")] CompressionMethod::Deflated => 20, #[cfg(feature = "bzip2")] CompressionMethod::Bzip2 => 46, #[cfg(feature = "deflate64")] CompressionMethod::Deflate64 => 21, #[cfg(feature = "lzma")] CompressionMethod::Lzma => 63, #[cfg(feature = "xz")] CompressionMethod::Xz => 63, // APPNOTE doesn't specify a version for Zstandard _ => DEFAULT_VERSION as u16, }; let crypto_version: u16 = if self.aes_mode.is_some() { 51 } else if self.encrypted { 20 } else { 10 }; let misc_feature_version: u16 = if self.large_file { 45 } else if self .unix_mode() .is_some_and(|mode| mode & S_IFDIR == S_IFDIR) { // file is directory 20 } else { 10 }; compression_version .max(crypto_version) .max(misc_feature_version) } #[inline(always)] pub(crate) fn extra_field_len(&self) -> usize { self.extra_field .as_ref() .map(|v| v.len()) .unwrap_or_default() } #[inline(always)] pub(crate) fn central_extra_field_len(&self) -> usize { self.central_extra_field .as_ref() .map(|v| v.len()) .unwrap_or_default() } #[allow(clippy::too_many_arguments)] pub(crate) fn initialize_local_block<S, T: FileOptionExtension>( name: S, options: &FileOptions<T>, raw_values: ZipRawValues, header_start: u64, extra_data_start: Option<u64>, aes_extra_data_start: u64, compression_method: crate::compression::CompressionMethod, aes_mode: Option<(AesMode, AesVendorVersion, CompressionMethod)>, extra_field: &[u8], ) -> Self where S: ToString, { let permissions = options.permissions.unwrap_or(0o100644); let file_name: Box<str> = name.to_string().into_boxed_str(); let file_name_raw: Box<[u8]> = file_name.bytes().collect(); let mut local_block = ZipFileData { system: System::Unix, version_made_by: DEFAULT_VERSION, encrypted: options.encrypt_with.is_some(), using_data_descriptor: false, is_utf8: !file_name.is_ascii(), compression_method, compression_level: options.compression_level, last_modified_time: Some(options.last_modified_time), crc32: raw_values.crc32, compressed_size: raw_values.compressed_size, uncompressed_size: raw_values.uncompressed_size, file_name, // Never used for saving, but used as map key in insert_file_data() file_name_raw, extra_field: Some(extra_field.to_vec().into()), central_extra_field: options.extended_options.central_extra_data().cloned(), file_comment: String::with_capacity(0).into_boxed_str(), header_start, data_start: OnceLock::new(), central_header_start: 0, external_attributes: permissions << 16, large_file: options.large_file, aes_mode, extra_fields: Vec::new(), extra_data_start, aes_extra_data_start, }; local_block.version_made_by = local_block.version_needed() as u8; local_block } pub(crate) fn from_local_block<R: std::io::Read>( block: ZipLocalEntryBlock, reader: &mut R, ) -> ZipResult<Self> { let ZipLocalEntryBlock { // magic, version_made_by, flags, compression_method, last_mod_time, last_mod_date, crc32, compressed_size, uncompressed_size, file_name_length, extra_field_length, .. } = block; let encrypted: bool = flags & 1 == 1; if encrypted { return Err(ZipError::UnsupportedArchive( "Encrypted files are not supported", )); } /* FIXME: these were previously incorrect: add testing! */ /* flags & (1 << 3) != 0 */ let using_data_descriptor: bool = flags & (1 << 3) == 1 << 3; if using_data_descriptor { return Err(ZipError::UnsupportedArchive( "The file length is not available in the local header", )); } /* flags & (1 << 1) != 0 */ let is_utf8: bool = flags & (1 << 11) != 0; let compression_method = crate::CompressionMethod::parse_from_u16(compression_method); let file_name_length: usize = file_name_length.into(); let extra_field_length: usize = extra_field_length.into(); let mut file_name_raw = vec![0u8; file_name_length]; reader.read_exact(&mut file_name_raw)?; let mut extra_field = vec![0u8; extra_field_length]; reader.read_exact(&mut extra_field)?; let file_name: Box<str> = match is_utf8 { true => String::from_utf8_lossy(&file_name_raw).into(), false => file_name_raw.clone().from_cp437().into(), }; let system: u8 = (version_made_by >> 8).try_into().unwrap(); Ok(ZipFileData { system: System::from(system), /* NB: this strips the top 8 bits! */ version_made_by: version_made_by as u8, encrypted, using_data_descriptor, is_utf8, compression_method, compression_level: None, last_modified_time: DateTime::try_from_msdos(last_mod_date, last_mod_time).ok(), crc32, compressed_size: compressed_size.into(), uncompressed_size: uncompressed_size.into(), file_name, file_name_raw: file_name_raw.into(), extra_field: Some(Arc::new(extra_field)), central_extra_field: None, file_comment: String::with_capacity(0).into_boxed_str(), // file comment is only available in the central directory // header_start and data start are not available, but also don't matter, since seeking is // not available. header_start: 0, data_start: OnceLock::new(), central_header_start: 0, // The external_attributes field is only available in the central directory. // We set this to zero, which should be valid as the docs state 'If input came // from standard input, this field is set to zero.' external_attributes: 0, large_file: false, aes_mode: None, extra_fields: Vec::new(), extra_data_start: None, aes_extra_data_start: 0, }) } fn is_utf8(&self) -> bool { std::str::from_utf8(&self.file_name_raw).is_ok() } fn is_ascii(&self) -> bool { self.file_name_raw.is_ascii() } fn flags(&self) -> u16 { let utf8_bit: u16 = if self.is_utf8() && !self.is_ascii() { 1u16 << 11 } else { 0 }; let encrypted_bit: u16 = if self.encrypted { 1u16 << 0 } else { 0 }; utf8_bit | encrypted_bit } fn clamp_size_field(&self, field: u64) -> u32 { if self.large_file { spec::ZIP64_BYTES_THR as u32 } else { field.min(spec::ZIP64_BYTES_THR).try_into().unwrap() } } pub(crate) fn local_block(&self) -> ZipResult<ZipLocalEntryBlock> { let compressed_size: u32 = self.clamp_size_field(self.compressed_size); let uncompressed_size: u32 = self.clamp_size_field(self.uncompressed_size); let extra_field_length: u16 = self .extra_field_len() .try_into() .map_err(|_| ZipError::InvalidArchive("Extra data field is too large"))?; let last_modified_time = self .last_modified_time .unwrap_or_else(DateTime::default_for_write); Ok(ZipLocalEntryBlock { magic: ZipLocalEntryBlock::MAGIC, version_made_by: self.version_needed(), flags: self.flags(), compression_method: self.compression_method.serialize_to_u16(), last_mod_time: last_modified_time.timepart(), last_mod_date: last_modified_time.datepart(), crc32: self.crc32, compressed_size, uncompressed_size, file_name_length: self.file_name_raw.len().try_into().unwrap(), extra_field_length, }) } pub(crate) fn block(&self) -> ZipResult<ZipCentralEntryBlock> { let extra_field_len: u16 = self.extra_field_len().try_into().unwrap(); let central_extra_field_len: u16 = self.central_extra_field_len().try_into().unwrap(); let last_modified_time = self .last_modified_time .unwrap_or_else(DateTime::default_for_write); let version_to_extract = self.version_needed(); let version_made_by = (self.version_made_by as u16).max(version_to_extract); Ok(ZipCentralEntryBlock { magic: ZipCentralEntryBlock::MAGIC, version_made_by: ((self.system as u16) << 8) | version_made_by, version_to_extract, flags: self.flags(), compression_method: self.compression_method.serialize_to_u16(), last_mod_time: last_modified_time.timepart(), last_mod_date: last_modified_time.datepart(), crc32: self.crc32, compressed_size: self .compressed_size .min(spec::ZIP64_BYTES_THR) .try_into() .unwrap(), uncompressed_size: self .uncompressed_size .min(spec::ZIP64_BYTES_THR) .try_into() .unwrap(), file_name_length: self.file_name_raw.len().try_into().unwrap(), extra_field_length: extra_field_len.checked_add(central_extra_field_len).ok_or( ZipError::InvalidArchive("Extra field length in central directory exceeds 64KiB"), )?, file_comment_length: self.file_comment.len().try_into().unwrap(), disk_number: 0, internal_file_attributes: 0, external_file_attributes: self.external_attributes, offset: self .header_start .min(spec::ZIP64_BYTES_THR) .try_into() .unwrap(), }) } pub(crate) fn zip64_extra_field_block(&self) -> Option<Zip64ExtraFieldBlock> { Zip64ExtraFieldBlock::maybe_new( self.large_file, self.uncompressed_size, self.compressed_size, self.header_start, ) } } #[derive(Copy, Clone, Debug)] #[repr(packed, C)] pub(crate) struct ZipCentralEntryBlock { magic: spec::Magic, pub version_made_by: u16, pub version_to_extract: u16, pub flags: u16, pub compression_method: u16, pub last_mod_time: u16, pub last_mod_date: u16, pub crc32: u32, pub compressed_size: u32, pub uncompressed_size: u32, pub file_name_length: u16, pub extra_field_length: u16, pub file_comment_length: u16, pub disk_number: u16, pub internal_file_attributes: u16, pub external_file_attributes: u32, pub offset: u32, } unsafe impl Pod for ZipCentralEntryBlock {} impl FixedSizeBlock for ZipCentralEntryBlock { const MAGIC: spec::Magic = spec::Magic::CENTRAL_DIRECTORY_HEADER_SIGNATURE; #[inline(always)] fn magic(self) -> spec::Magic { self.magic } const WRONG_MAGIC_ERROR: ZipError = ZipError::InvalidArchive("Invalid Central Directory header"); to_and_from_le![ (magic, spec::Magic), (version_made_by, u16), (version_to_extract, u16), (flags, u16), (compression_method, u16), (last_mod_time, u16), (last_mod_date, u16), (crc32, u32), (compressed_size, u32), (uncompressed_size, u32), (file_name_length, u16), (extra_field_length, u16), (file_comment_length, u16), (disk_number, u16), (internal_file_attributes, u16), (external_file_attributes, u32), (offset, u32), ]; } #[derive(Copy, Clone, Debug)] #[repr(packed, C)] pub(crate) struct ZipLocalEntryBlock { magic: spec::Magic, pub version_made_by: u16, pub flags: u16, pub compression_method: u16, pub last_mod_time: u16, pub last_mod_date: u16, pub crc32: u32, pub compressed_size: u32, pub uncompressed_size: u32, pub file_name_length: u16, pub extra_field_length: u16, } unsafe impl Pod for ZipLocalEntryBlock {} impl FixedSizeBlock for ZipLocalEntryBlock { const MAGIC: spec::Magic = spec::Magic::LOCAL_FILE_HEADER_SIGNATURE; #[inline(always)] fn magic(self) -> spec::Magic { self.magic } const WRONG_MAGIC_ERROR: ZipError = ZipError::InvalidArchive("Invalid local file header"); to_and_from_le![ (magic, spec::Magic), (version_made_by, u16), (flags, u16), (compression_method, u16), (last_mod_time, u16), (last_mod_date, u16), (crc32, u32), (compressed_size, u32), (uncompressed_size, u32), (file_name_length, u16), (extra_field_length, u16), ]; } #[derive(Copy, Clone, Debug)] pub(crate) struct Zip64ExtraFieldBlock { magic: spec::ExtraFieldMagic, size: u16, uncompressed_size: Option<u64>, compressed_size: Option<u64>, header_start: Option<u64>, // Excluded fields: // u32: disk start number } impl Zip64ExtraFieldBlock { pub(crate) fn maybe_new( large_file: bool, uncompressed_size: u64, compressed_size: u64, header_start: u64, ) -> Option<Zip64ExtraFieldBlock> { let mut size: u16 = 0; let uncompressed_size = if uncompressed_size >= ZIP64_BYTES_THR || large_file { size += mem::size_of::<u64>() as u16; Some(uncompressed_size) } else { None }; let compressed_size = if compressed_size >= ZIP64_BYTES_THR || large_file { size += mem::size_of::<u64>() as u16; Some(compressed_size) } else { None }; let header_start = if header_start >= ZIP64_BYTES_THR { size += mem::size_of::<u64>() as u16; Some(header_start) } else { None }; if size == 0 { return None; } Some(Zip64ExtraFieldBlock { magic: spec::ExtraFieldMagic::ZIP64_EXTRA_FIELD_TAG, size, uncompressed_size, compressed_size, header_start, }) } } impl Zip64ExtraFieldBlock { pub fn full_size(&self) -> usize { assert!(self.size > 0); self.size as usize + mem::size_of::<spec::ExtraFieldMagic>() + mem::size_of::<u16>() } pub fn serialize(self) -> Box<[u8]> { let Self { magic, size, uncompressed_size, compressed_size, header_start, } = self; let full_size = self.full_size(); let mut ret = Vec::with_capacity(full_size); ret.extend(magic.to_le_bytes()); ret.extend(u16::to_le_bytes(size)); if let Some(uncompressed_size) = uncompressed_size { ret.extend(u64::to_le_bytes(uncompressed_size)); } if let Some(compressed_size) = compressed_size { ret.extend(u64::to_le_bytes(compressed_size)); } if let Some(header_start) = header_start { ret.extend(u64::to_le_bytes(header_start)); } debug_assert_eq!(ret.len(), full_size); ret.into_boxed_slice() } } /// The encryption specification used to encrypt a file with AES. /// /// According to the [specification](https://www.winzip.com/win/en/aes_info.html#winzip11) AE-2 /// does not make use of the CRC check. #[derive(Copy, Clone, Debug)] #[repr(u16)] pub enum AesVendorVersion { Ae1 = 0x0001, Ae2 = 0x0002, } /// AES variant used. #[derive(Copy, Clone, Debug, Eq, PartialEq)] #[cfg_attr(fuzzing, derive(arbitrary::Arbitrary))] #[repr(u8)] pub enum AesMode { /// 128-bit AES encryption. Aes128 = 0x01, /// 192-bit AES encryption. Aes192 = 0x02, /// 256-bit AES encryption. Aes256 = 0x03, } #[cfg(feature = "aes-crypto")] impl AesMode { /// Length of the salt for the given AES mode. pub const fn salt_length(&self) -> usize { self.key_length() / 2 } /// Length of the key for the given AES mode. pub const fn key_length(&self) -> usize { match self { Self::Aes128 => 16, Self::Aes192 => 24, Self::Aes256 => 32, } } } #[cfg(test)] mod test { #[test] fn system() { use super::System; assert_eq!(u8::from(System::Dos), 0u8); assert_eq!(System::Dos as u8, 0u8); assert_eq!(System::Unix as u8, 3u8); assert_eq!(u8::from(System::Unix), 3u8); assert_eq!(System::from(0), System::Dos); assert_eq!(System::from(3), System::Unix); assert_eq!(u8::from(System::Unknown), 4u8); assert_eq!(System::Unknown as u8, 4u8); } #[test] fn sanitize() { use super::*; let file_name = "/path/../../../../etc/./passwd\0/etc/shadow".to_string(); let data = ZipFileData { system: System::Dos, version_made_by: 0, encrypted: false, using_data_descriptor: false, is_utf8: true, compression_method: crate::compression::CompressionMethod::Stored, compression_level: None, last_modified_time: None, crc32: 0, compressed_size: 0, uncompressed_size: 0, file_name: file_name.clone().into_boxed_str(), file_name_raw: file_name.into_bytes().into_boxed_slice(), extra_field: None, central_extra_field: None, file_comment: String::with_capacity(0).into_boxed_str(), header_start: 0, extra_data_start: None, data_start: OnceLock::new(), central_header_start: 0, external_attributes: 0, large_file: false, aes_mode: None, aes_extra_data_start: 0, extra_fields: Vec::new(), }; assert_eq!(data.file_name_sanitized(), PathBuf::from("path/etc/passwd")); } #[test] #[allow(clippy::unusual_byte_groupings)] fn datetime_default() { use super::DateTime; let dt = DateTime::default(); assert_eq!(dt.timepart(), 0); assert_eq!(dt.datepart(), 0b0000000_0001_00001); } #[test] #[allow(clippy::unusual_byte_groupings)] fn datetime_max() { use super::DateTime; let dt = DateTime::from_date_and_time(2107, 12, 31, 23, 59, 58).unwrap(); assert_eq!(dt.timepart(), 0b10111_111011_11101); assert_eq!(dt.datepart(), 0b1111111_1100_11111); } #[test] fn datetime_equality() { use super::DateTime; let dt = DateTime::from_date_and_time(2018, 11, 17, 10, 38, 30).unwrap(); assert_eq!( dt, DateTime::from_date_and_time(2018, 11, 17, 10, 38, 30).unwrap() ); assert_ne!(dt, DateTime::default()); } #[test] fn datetime_order() { use std::cmp::Ordering; use super::DateTime; let dt = DateTime::from_date_and_time(2018, 11, 17, 10, 38, 30).unwrap(); assert_eq!( dt.cmp(&DateTime::from_date_and_time(2018, 11, 17, 10, 38, 30).unwrap()), Ordering::Equal ); // year assert!(dt < DateTime::from_date_and_time(2019, 11, 17, 10, 38, 30).unwrap()); assert!(dt > DateTime::from_date_and_time(2017, 11, 17, 10, 38, 30).unwrap()); // month assert!(dt < DateTime::from_date_and_time(2018, 12, 17, 10, 38, 30).unwrap()); assert!(dt > DateTime::from_date_and_time(2018, 10, 17, 10, 38, 30).unwrap()); // day assert!(dt < DateTime::from_date_and_time(2018, 11, 18, 10, 38, 30).unwrap()); assert!(dt > DateTime::from_date_and_time(2018, 11, 16, 10, 38, 30).unwrap()); // hour assert!(dt < DateTime::from_date_and_time(2018, 11, 17, 11, 38, 30).unwrap()); assert!(dt > DateTime::from_date_and_time(2018, 11, 17, 9, 38, 30).unwrap()); // minute assert!(dt < DateTime::from_date_and_time(2018, 11, 17, 10, 39, 30).unwrap()); assert!(dt > DateTime::from_date_and_time(2018, 11, 17, 10, 37, 30).unwrap()); // second assert!(dt < DateTime::from_date_and_time(2018, 11, 17, 10, 38, 32).unwrap()); assert_eq!( dt.cmp(&DateTime::from_date_and_time(2018, 11, 17, 10, 38, 31).unwrap()), Ordering::Equal ); assert!(dt > DateTime::from_date_and_time(2018, 11, 17, 10, 38, 29).unwrap()); assert!(dt > DateTime::from_date_and_time(2018, 11, 17, 10, 38, 28).unwrap()); } #[test] fn datetime_display() { use super::DateTime; assert_eq!(format!("{}", DateTime::default()), "1980-01-01 00:00:00"); assert_eq!( format!( "{}", DateTime::from_date_and_time(2018, 11, 17, 10, 38, 30).unwrap() ), "2018-11-17 10:38:30" ); assert_eq!( format!( "{}", DateTime::from_date_and_time(2107, 12, 31, 23, 59, 58).unwrap() ), "2107-12-31 23:59:58" ); } #[test] fn datetime_bounds() { use super::DateTime; assert!(DateTime::from_date_and_time(2000, 1, 1, 23, 59, 60).is_ok()); assert!(DateTime::from_date_and_time(2000, 1, 1, 24, 0, 0).is_err()); assert!(DateTime::from_date_and_time(2000, 1, 1, 0, 60, 0).is_err()); assert!(DateTime::from_date_and_time(2000, 1, 1, 0, 0, 61).is_err()); assert!(DateTime::from_date_and_time(2107, 12, 31, 0, 0, 0).is_ok()); assert!(DateTime::from_date_and_time(1980, 1, 1, 0, 0, 0).is_ok()); assert!(DateTime::from_date_and_time(1979, 1, 1, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(1980, 0, 1, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(1980, 1, 0, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(2108, 12, 31, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(2107, 13, 31, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(2107, 12, 32, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(2018, 1, 31, 0, 0, 0).is_ok()); assert!(DateTime::from_date_and_time(2018, 2, 28, 0, 0, 0).is_ok()); assert!(DateTime::from_date_and_time(2018, 2, 29, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(2018, 3, 31, 0, 0, 0).is_ok()); assert!(DateTime::from_date_and_time(2018, 4, 30, 0, 0, 0).is_ok()); assert!(DateTime::from_date_and_time(2018, 4, 31, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(2018, 5, 31, 0, 0, 0).is_ok()); assert!(DateTime::from_date_and_time(2018, 6, 30, 0, 0, 0).is_ok()); assert!(DateTime::from_date_and_time(2018, 6, 31, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(2018, 7, 31, 0, 0, 0).is_ok()); assert!(DateTime::from_date_and_time(2018, 8, 31, 0, 0, 0).is_ok()); assert!(DateTime::from_date_and_time(2018, 9, 30, 0, 0, 0).is_ok()); assert!(DateTime::from_date_and_time(2018, 9, 31, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(2018, 10, 31, 0, 0, 0).is_ok()); assert!(DateTime::from_date_and_time(2018, 11, 30, 0, 0, 0).is_ok()); assert!(DateTime::from_date_and_time(2018, 11, 31, 0, 0, 0).is_err()); assert!(DateTime::from_date_and_time(2018, 12, 31, 0, 0, 0).is_ok()); // leap year: divisible by 4 assert!(DateTime::from_date_and_time(2024, 2, 29, 0, 0, 0).is_ok()); // leap year: divisible by 100 and by 400 assert!(DateTime::from_date_and_time(2000, 2, 29, 0, 0, 0).is_ok()); // common year: divisible by 100 but not by 400 assert!(DateTime::from_date_and_time(2100, 2, 29, 0, 0, 0).is_err()); } #[cfg(feature = "time")] use time::{format_description::well_known::Rfc3339, OffsetDateTime}; #[cfg(feature = "time")] #[test] fn datetime_try_from_offset_datetime() { use time::macros::datetime; use super::DateTime; // 2018-11-17 10:38:30 let dt = DateTime::try_from(datetime!(2018-11-17 10:38:30 UTC)).unwrap(); assert_eq!(dt.year(), 2018); assert_eq!(dt.month(), 11); assert_eq!(dt.day(), 17); assert_eq!(dt.hour(), 10); assert_eq!(dt.minute(), 38); assert_eq!(dt.second(), 30); } #[cfg(feature = "time")] #[test] fn datetime_try_from_bounds() { use super::DateTime; use time::macros::datetime; // 1979-12-31 23:59:59 assert!(DateTime::try_from(datetime!(1979-12-31 23:59:59 UTC)).is_err()); // 1980-01-01 00:00:00 assert!(DateTime::try_from(datetime!(1980-01-01 00:00:00 UTC)).is_ok()); // 2107-12-31 23:59:59 assert!(DateTime::try_from(datetime!(2107-12-31 23:59:59 UTC)).is_ok()); // 2108-01-01 00:00:00 assert!(DateTime::try_from(datetime!(2108-01-01 00:00:00 UTC)).is_err()); } #[cfg(feature = "time")] #[test] fn offset_datetime_try_from_datetime() { use time::macros::datetime; use super::DateTime; // 2018-11-17 10:38:30 UTC let dt = OffsetDateTime::try_from(DateTime::try_from_msdos(0x4D71, 0x54CF).unwrap()).unwrap(); assert_eq!(dt, datetime!(2018-11-17 10:38:30 UTC)); } #[cfg(feature = "time")] #[test] fn offset_datetime_try_from_bounds() { use super::DateTime; // 1980-00-00 00:00:00 assert!(OffsetDateTime::try_from(unsafe { DateTime::from_msdos_unchecked(0x0000, 0x0000) }) .is_err()); // 2107-15-31 31:63:62 assert!(OffsetDateTime::try_from(unsafe { DateTime::from_msdos_unchecked(0xFFFF, 0xFFFF) }) .is_err()); } #[test] #[allow(deprecated)] fn time_conversion() { use super::DateTime; let dt = DateTime::try_from_msdos(0x4D71, 0x54CF).unwrap(); assert_eq!(dt.year(), 2018); assert_eq!(dt.month(), 11); assert_eq!(dt.day(), 17); assert_eq!(dt.hour(), 10); assert_eq!(dt.minute(), 38); assert_eq!(dt.second(), 30); let dt = DateTime::try_from((0x4D71, 0x54CF)).unwrap(); assert_eq!(dt.year(), 2018); assert_eq!(dt.month(), 11); assert_eq!(dt.day(), 17); assert_eq!(dt.hour(), 10); assert_eq!(dt.minute(), 38); assert_eq!(dt.second(), 30); #[cfg(feature = "time")] assert_eq!( dt.to_time().unwrap().format(&Rfc3339).unwrap(), "2018-11-17T10:38:30Z" ); assert_eq!(<(u16, u16)>::from(dt), (0x4D71, 0x54CF)); } #[test] #[allow(deprecated)] fn time_out_of_bounds() { use super::DateTime; let dt = unsafe { DateTime::from_msdos_unchecked(0xFFFF, 0xFFFF) }; assert_eq!(dt.year(), 2107); assert_eq!(dt.month(), 15); assert_eq!(dt.day(), 31); assert_eq!(dt.hour(), 31); assert_eq!(dt.minute(), 63); assert_eq!(dt.second(), 62); #[cfg(feature = "time")] assert!(dt.to_time().is_err()); let dt = unsafe { DateTime::from_msdos_unchecked(0x0000, 0x0000) }; assert_eq!(dt.year(), 1980); assert_eq!(dt.month(), 0); assert_eq!(dt.day(), 0); assert_eq!(dt.hour(), 0); assert_eq!(dt.minute(), 0); assert_eq!(dt.second(), 0); #[cfg(feature = "time")] assert!(dt.to_time().is_err()); } #[cfg(feature = "time")] #[test] fn time_at_january() { use super::DateTime; // 2020-01-01 00:00:00 let clock = OffsetDateTime::from_unix_timestamp(1_577_836_800).unwrap(); assert!(DateTime::try_from(clock).is_ok()); } }
#![allow(missing_docs)] use std::borrow::Cow; use std::io; use std::io::{Read, Write}; use std::path::{Component, Path, MAIN_SEPARATOR}; /// Provides high level API for reading from a stream. pub mod stream { pub use crate::read::stream::*; } /// Types for creating ZIP archives. pub mod write { use crate::write::{FileOptionExtension, FileOptions}; /// Unstable methods for [`FileOptions`]. pub trait FileOptionsExt { /// Write the file with the given password using the deprecated ZipCrypto algorithm. /// /// This is not recommended for new archives, as ZipCrypto is not secure. fn with_deprecated_encryption(self, password: &[u8]) -> Self; } impl<T: FileOptionExtension> FileOptionsExt for FileOptions<'_, T> { fn with_deprecated_encryption(self, password: &[u8]) -> FileOptions<'static, T> { self.with_deprecated_encryption(password) } } } /// Helper methods for writing unsigned integers in little-endian form. pub trait LittleEndianWriteExt: Write { fn write_u16_le(&mut self, input: u16) -> io::Result<()> { self.write_all(&input.to_le_bytes()) } fn write_u32_le(&mut self, input: u32) -> io::Result<()> { self.write_all(&input.to_le_bytes()) } fn write_u64_le(&mut self, input: u64) -> io::Result<()> { self.write_all(&input.to_le_bytes()) } fn write_u128_le(&mut self, input: u128) -> io::Result<()> { self.write_all(&input.to_le_bytes()) } } impl<W: Write + ?Sized> LittleEndianWriteExt for W {} /// Helper methods for reading unsigned integers in little-endian form. pub trait LittleEndianReadExt: Read { fn read_u16_le(&mut self) -> io::Result<u16> { let mut out = [0u8; 2]; self.read_exact(&mut out)?; Ok(u16::from_le_bytes(out)) } fn read_u32_le(&mut self) -> io::Result<u32> { let mut out = [0u8; 4]; self.read_exact(&mut out)?; Ok(u32::from_le_bytes(out)) } fn read_u64_le(&mut self) -> io::Result<u64> { let mut out = [0u8; 8]; self.read_exact(&mut out)?; Ok(u64::from_le_bytes(out)) } } impl<R: Read> LittleEndianReadExt for R {} /// Converts a path to the ZIP format (forward-slash-delimited and normalized). pub fn path_to_string<T: AsRef<Path>>(path: T) -> Box<str> { let mut maybe_original = None; if let Some(original) = path.as_ref().to_str() { if original.is_empty() || original == "." || original == ".." { return String::new().into_boxed_str(); } if original.starts_with(MAIN_SEPARATOR) { if original.len() == 1 { return MAIN_SEPARATOR.to_string().into_boxed_str(); } else if (MAIN_SEPARATOR == '/' || !original[1..].contains(MAIN_SEPARATOR)) && !original.ends_with('.') && !original.contains([MAIN_SEPARATOR, MAIN_SEPARATOR]) && !original.contains([MAIN_SEPARATOR, '.', MAIN_SEPARATOR]) && !original.contains([MAIN_SEPARATOR, '.', '.', MAIN_SEPARATOR]) { maybe_original = Some(&original[1..]); } } else if !original.contains(MAIN_SEPARATOR) { return original.into(); } } let mut recreate = maybe_original.is_none(); let mut normalized_components = Vec::new(); for component in path.as_ref().components() { match component { Component::Normal(os_str) => match os_str.to_str() { Some(valid_str) => normalized_components.push(Cow::Borrowed(valid_str)), None => { recreate = true; normalized_components.push(os_str.to_string_lossy()); } }, Component::ParentDir => { recreate = true; normalized_components.pop(); } _ => { recreate = true; } } } if recreate { normalized_components.join("/").into() } else { maybe_original.unwrap().into() } }
//! Types for creating ZIP archives #[cfg(feature = "aes-crypto")] use crate::aes::AesWriter; use crate::compression::CompressionMethod; use crate::read::{ find_content, parse_single_extra_field, Config, ZipArchive, ZipFile, ZipFileReader, }; use crate::result::{ZipError, ZipResult}; use crate::spec::{self, FixedSizeBlock, Pod, Zip32CDEBlock}; #[cfg(feature = "aes-crypto")] use crate::types::AesMode; use crate::types::{ ffi, AesVendorVersion, DateTime, Zip64ExtraFieldBlock, ZipFileData, ZipLocalEntryBlock, ZipRawValues, MIN_VERSION, }; use crate::write::ffi::S_IFLNK; #[cfg(any(feature = "_deflate-any", feature = "bzip2", feature = "zstd",))] use core::num::NonZeroU64; use crc32fast::Hasher; use indexmap::IndexMap; use std::borrow::ToOwned; use std::default::Default; use std::fmt::{Debug, Formatter}; use std::io; use std::io::prelude::*; use std::io::Cursor; use std::io::{BufReader, SeekFrom}; use std::marker::PhantomData; use std::mem; use std::str::{from_utf8, Utf8Error}; use std::sync::Arc; #[cfg(feature = "deflate-flate2")] use flate2::{write::DeflateEncoder, Compression}; #[cfg(feature = "bzip2")] use bzip2::write::BzEncoder; #[cfg(feature = "deflate-zopfli")] use zopfli::Options; #[cfg(feature = "deflate-zopfli")] use std::io::BufWriter; use std::mem::size_of; use std::path::Path; #[cfg(feature = "zstd")] use zstd::stream::write::Encoder as ZstdEncoder; enum MaybeEncrypted<W> { Unencrypted(W), #[cfg(feature = "aes-crypto")] Aes(AesWriter<W>), ZipCrypto(crate::zipcrypto::ZipCryptoWriter<W>), } impl<W> Debug for MaybeEncrypted<W> { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { // Don't print W, since it may be a huge Vec<u8> f.write_str(match self { MaybeEncrypted::Unencrypted(_) => "Unencrypted", #[cfg(feature = "aes-crypto")] MaybeEncrypted::Aes(_) => "AES", MaybeEncrypted::ZipCrypto(_) => "ZipCrypto", }) } } impl<W: Write> Write for MaybeEncrypted<W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { match self { MaybeEncrypted::Unencrypted(w) => w.write(buf), #[cfg(feature = "aes-crypto")] MaybeEncrypted::Aes(w) => w.write(buf), MaybeEncrypted::ZipCrypto(w) => w.write(buf), } } fn flush(&mut self) -> io::Result<()> { match self { MaybeEncrypted::Unencrypted(w) => w.flush(), #[cfg(feature = "aes-crypto")] MaybeEncrypted::Aes(w) => w.flush(), MaybeEncrypted::ZipCrypto(w) => w.flush(), } } } enum GenericZipWriter<W: Write + Seek> { Closed, Storer(MaybeEncrypted<W>), #[cfg(feature = "deflate-flate2")] Deflater(DeflateEncoder<MaybeEncrypted<W>>), #[cfg(feature = "deflate-zopfli")] ZopfliDeflater(zopfli::DeflateEncoder<MaybeEncrypted<W>>), #[cfg(feature = "deflate-zopfli")] BufferedZopfliDeflater(BufWriter<zopfli::DeflateEncoder<MaybeEncrypted<W>>>), #[cfg(feature = "bzip2")] Bzip2(BzEncoder<MaybeEncrypted<W>>), #[cfg(feature = "zstd")] Zstd(ZstdEncoder<'static, MaybeEncrypted<W>>), } impl<W: Write + Seek> Debug for GenericZipWriter<W> { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { Closed => f.write_str("Closed"), Storer(w) => f.write_fmt(format_args!("Storer({:?})", w)), #[cfg(feature = "deflate-flate2")] GenericZipWriter::Deflater(w) => { f.write_fmt(format_args!("Deflater({:?})", w.get_ref())) } #[cfg(feature = "deflate-zopfli")] GenericZipWriter::ZopfliDeflater(_) => f.write_str("ZopfliDeflater"), #[cfg(feature = "deflate-zopfli")] GenericZipWriter::BufferedZopfliDeflater(_) => f.write_str("BufferedZopfliDeflater"), #[cfg(feature = "bzip2")] GenericZipWriter::Bzip2(w) => f.write_fmt(format_args!("Bzip2({:?})", w.get_ref())), #[cfg(feature = "zstd")] GenericZipWriter::Zstd(w) => f.write_fmt(format_args!("Zstd({:?})", w.get_ref())), } } } // Put the struct declaration in a private module to convince rustdoc to display ZipWriter nicely pub(crate) mod zip_writer { use super::*; /// ZIP archive generator /// /// Handles the bookkeeping involved in building an archive, and provides an /// API to edit its contents. /// /// ``` /// # fn doit() -> zip::result::ZipResult<()> /// # { /// # use zip::ZipWriter; /// use std::io::Write; /// use zip::write::SimpleFileOptions; /// /// // We use a buffer here, though you'd normally use a `File` /// let mut buf = [0; 65536]; /// let mut zip = ZipWriter::new(std::io::Cursor::new(&mut buf[..])); /// /// let options = SimpleFileOptions::default().compression_method(zip::CompressionMethod::Stored); /// zip.start_file("hello_world.txt", options)?; /// zip.write(b"Hello, World!")?; /// /// // Apply the changes you've made. /// // Dropping the `ZipWriter` will have the same effect, but may silently fail /// zip.finish()?; /// /// # Ok(()) /// # } /// # doit().unwrap(); /// ``` pub struct ZipWriter<W: Write + Seek> { pub(super) inner: GenericZipWriter<W>, pub(super) files: IndexMap<Box<str>, ZipFileData>, pub(super) stats: ZipWriterStats, pub(super) writing_to_file: bool, pub(super) writing_raw: bool, pub(super) comment: Box<[u8]>, pub(super) zip64_comment: Option<Box<[u8]>>, pub(super) flush_on_finish_file: bool, } impl<W: Write + Seek> Debug for ZipWriter<W> { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.write_fmt(format_args!( "ZipWriter {{files: {:?}, stats: {:?}, writing_to_file: {}, writing_raw: {}, comment: {:?}, flush_on_finish_file: {}}}", self.files, self.stats, self.writing_to_file, self.writing_raw, self.comment, self.flush_on_finish_file)) } } } #[doc(inline)] pub use self::sealed::FileOptionExtension; use crate::result::ZipError::{InvalidArchive, UnsupportedArchive}; use crate::unstable::path_to_string; use crate::unstable::LittleEndianWriteExt; use crate::write::GenericZipWriter::{Closed, Storer}; use crate::zipcrypto::ZipCryptoKeys; use crate::CompressionMethod::Stored; pub use zip_writer::ZipWriter; #[derive(Default, Debug)] struct ZipWriterStats { hasher: Hasher, start: u64, bytes_written: u64, } mod sealed { use std::sync::Arc; use super::ExtendedFileOptions; pub trait Sealed {} /// File options Extensions #[doc(hidden)] pub trait FileOptionExtension: Default + Sealed { /// Extra Data fn extra_data(&self) -> Option<&Arc<Vec<u8>>>; /// Central Extra Data fn central_extra_data(&self) -> Option<&Arc<Vec<u8>>>; } impl Sealed for () {} impl FileOptionExtension for () { fn extra_data(&self) -> Option<&Arc<Vec<u8>>> { None } fn central_extra_data(&self) -> Option<&Arc<Vec<u8>>> { None } } impl Sealed for ExtendedFileOptions {} impl FileOptionExtension for ExtendedFileOptions { fn extra_data(&self) -> Option<&Arc<Vec<u8>>> { Some(&self.extra_data) } fn central_extra_data(&self) -> Option<&Arc<Vec<u8>>> { Some(&self.central_extra_data) } } } #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub(crate) enum EncryptWith<'k> { #[cfg(feature = "aes-crypto")] Aes { mode: AesMode, password: &'k str, }, ZipCrypto(ZipCryptoKeys, PhantomData<&'k ()>), } #[cfg(fuzzing)] impl<'a> arbitrary::Arbitrary<'a> for EncryptWith<'a> { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> { #[cfg(feature = "aes-crypto")] if bool::arbitrary(u)? { return Ok(EncryptWith::Aes { mode: AesMode::arbitrary(u)?, password: u.arbitrary::<&str>()?, }); } Ok(EncryptWith::ZipCrypto( ZipCryptoKeys::arbitrary(u)?, PhantomData, )) } } /// Metadata for a file to be written #[derive(Clone, Debug, Copy, Eq, PartialEq)] pub struct FileOptions<'k, T: FileOptionExtension> { pub(crate) compression_method: CompressionMethod, pub(crate) compression_level: Option<i64>, pub(crate) last_modified_time: DateTime, pub(crate) permissions: Option<u32>, pub(crate) large_file: bool, pub(crate) encrypt_with: Option<EncryptWith<'k>>, pub(crate) extended_options: T, pub(crate) alignment: u16, #[cfg(feature = "deflate-zopfli")] pub(super) zopfli_buffer_size: Option<usize>, } /// Simple File Options. Can be copied and good for simple writing zip files pub type SimpleFileOptions = FileOptions<'static, ()>; /// Adds Extra Data and Central Extra Data. It does not implement copy. pub type FullFileOptions<'k> = FileOptions<'k, ExtendedFileOptions>; /// The Extension for Extra Data and Central Extra Data #[derive(Clone, Default, Eq, PartialEq)] pub struct ExtendedFileOptions { extra_data: Arc<Vec<u8>>, central_extra_data: Arc<Vec<u8>>, } impl ExtendedFileOptions { /// Adds an extra data field, unless we detect that it's invalid. pub fn add_extra_data( &mut self, header_id: u16, data: Box<[u8]>, central_only: bool, ) -> ZipResult<()> { let len = data.len() + 4; if self.extra_data.len() + self.central_extra_data.len() + len > u16::MAX as usize { Err(InvalidArchive( "Extra data field would be longer than allowed", )) } else { let field = if central_only { &mut self.central_extra_data } else { &mut self.extra_data }; let vec = Arc::get_mut(field); let vec = match vec { Some(exclusive) => exclusive, None => { *field = Arc::new(field.to_vec()); Arc::get_mut(field).unwrap() } }; Self::add_extra_data_unchecked(vec, header_id, data)?; Self::validate_extra_data(vec, true)?; Ok(()) } } pub(crate) fn add_extra_data_unchecked( vec: &mut Vec<u8>, header_id: u16, data: Box<[u8]>, ) -> Result<(), ZipError> { vec.reserve_exact(data.len() + 4); vec.write_u16_le(header_id)?; vec.write_u16_le(data.len() as u16)?; vec.write_all(&data)?; Ok(()) } fn validate_extra_data(data: &[u8], disallow_zip64: bool) -> ZipResult<()> { let len = data.len() as u64; if len == 0 { return Ok(()); } if len > u16::MAX as u64 { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, "Extra-data field can't exceed u16::MAX bytes", ))); } let mut data = Cursor::new(data); let mut pos = data.position(); while pos < len { if len - data.position() < 4 { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, "Extra-data field doesn't have room for ID and length", ))); } #[cfg(not(feature = "unreserved"))] { use crate::unstable::LittleEndianReadExt; let header_id = data.read_u16_le()?; if EXTRA_FIELD_MAPPING .iter() .any(|&mapped| mapped == header_id) { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, format!( "Extra data header ID {header_id:#06} requires crate feature \"unreserved\"", ), ))); } data.seek(SeekFrom::Current(-2))?; } parse_single_extra_field(&mut ZipFileData::default(), &mut data, pos, disallow_zip64)?; pos = data.position(); } Ok(()) } } impl Debug for ExtendedFileOptions { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { f.write_fmt(format_args!("ExtendedFileOptions {{extra_data: vec!{:?}.into(), central_extra_data: vec!{:?}.into()}}", self.extra_data, self.central_extra_data)) } } #[cfg(fuzzing)] impl<'a> arbitrary::Arbitrary<'a> for FileOptions<'a, ExtendedFileOptions> { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> { let mut options = FullFileOptions { compression_method: CompressionMethod::arbitrary(u)?, compression_level: if bool::arbitrary(u)? { Some(u.int_in_range(0..=24)?) } else { None }, last_modified_time: DateTime::arbitrary(u)?, permissions: Option::<u32>::arbitrary(u)?, large_file: bool::arbitrary(u)?, encrypt_with: Option::<EncryptWith>::arbitrary(u)?, alignment: u16::arbitrary(u)?, #[cfg(feature = "deflate-zopfli")] zopfli_buffer_size: None, ..Default::default() }; #[cfg(feature = "deflate-zopfli")] if options.compression_method == CompressionMethod::Deflated && bool::arbitrary(u)? { options.zopfli_buffer_size = Some(if bool::arbitrary(u)? { 2 } else { 3 } << u.int_in_range(8..=20)?); } u.arbitrary_loop(Some(0), Some(10), |u| { options .add_extra_data( u.int_in_range(2..=u16::MAX)?, Box::<[u8]>::arbitrary(u)?, bool::arbitrary(u)?, ) .map_err(|_| arbitrary::Error::IncorrectFormat)?; Ok(core::ops::ControlFlow::Continue(())) })?; ZipWriter::new(Cursor::new(Vec::new())) .start_file("", options.clone()) .map_err(|_| arbitrary::Error::IncorrectFormat)?; Ok(options) } } impl<T: FileOptionExtension> FileOptions<'_, T> { /// Set the compression method for the new file /// /// The default is `CompressionMethod::Deflated` if it is enabled. If not, /// `CompressionMethod::Bzip2` is the default if it is enabled. If neither `bzip2` nor `deflate` /// is enabled, `CompressionMethod::Zlib` is the default. If all else fails, /// `CompressionMethod::Stored` becomes the default and files are written uncompressed. #[must_use] pub const fn compression_method(mut self, method: CompressionMethod) -> Self { self.compression_method = method; self } /// Set the compression level for the new file /// /// `None` value specifies default compression level. /// /// Range of values depends on compression method: /// * `Deflated`: 10 - 264 for Zopfli, 0 - 9 for other encoders. Default is 24 if Zopfli is the /// only encoder, or 6 otherwise. /// * `Bzip2`: 0 - 9. Default is 6 /// * `Zstd`: -7 - 22, with zero being mapped to default level. Default is 3 /// * others: only `None` is allowed #[must_use] pub const fn compression_level(mut self, level: Option<i64>) -> Self { self.compression_level = level; self } /// Set the last modified time /// /// The default is the current timestamp if the 'time' feature is enabled, and 1980-01-01 /// otherwise #[must_use] pub const fn last_modified_time(mut self, mod_time: DateTime) -> Self { self.last_modified_time = mod_time; self } /// Set the permissions for the new file. /// /// The format is represented with unix-style permissions. /// The default is `0o644`, which represents `rw-r--r--` for files, /// and `0o755`, which represents `rwxr-xr-x` for directories. /// /// This method only preserves the file permissions bits (via a `& 0o777`) and discards /// higher file mode bits. So it cannot be used to denote an entry as a directory, /// symlink, or other special file type. #[must_use] pub const fn unix_permissions(mut self, mode: u32) -> Self { self.permissions = Some(mode & 0o777); self } /// Set whether the new file's compressed and uncompressed size is less than 4 GiB. /// /// If set to `false` and the file exceeds the limit, an I/O error is thrown and the file is /// aborted. If set to `true`, readers will require ZIP64 support and if the file does not /// exceed the limit, 20 B are wasted. The default is `false`. #[must_use] pub const fn large_file(mut self, large: bool) -> Self { self.large_file = large; self } pub(crate) fn with_deprecated_encryption(self, password: &[u8]) -> FileOptions<'static, T> { FileOptions { encrypt_with: Some(EncryptWith::ZipCrypto( ZipCryptoKeys::derive(password), PhantomData, )), ..self } } /// Set the AES encryption parameters. #[cfg(feature = "aes-crypto")] pub fn with_aes_encryption(self, mode: AesMode, password: &str) -> FileOptions<'_, T> { FileOptions { encrypt_with: Some(EncryptWith::Aes { mode, password }), ..self } } /// Sets the size of the buffer used to hold the next block that Zopfli will compress. The /// larger the buffer, the more effective the compression, but the more memory is required. /// A value of `None` indicates no buffer, which is recommended only when all non-empty writes /// are larger than about 32 KiB. #[must_use] #[cfg(feature = "deflate-zopfli")] pub const fn with_zopfli_buffer(mut self, size: Option<usize>) -> Self { self.zopfli_buffer_size = size; self } /// Returns the compression level currently set. pub const fn get_compression_level(&self) -> Option<i64> { self.compression_level } /// Sets the alignment to the given number of bytes. #[must_use] pub const fn with_alignment(mut self, alignment: u16) -> Self { self.alignment = alignment; self } } impl FileOptions<'_, ExtendedFileOptions> { /// Adds an extra data field. pub fn add_extra_data( &mut self, header_id: u16, data: Box<[u8]>, central_only: bool, ) -> ZipResult<()> { self.extended_options .add_extra_data(header_id, data, central_only) } /// Removes the extra data fields. #[must_use] pub fn clear_extra_data(mut self) -> Self { if !self.extended_options.extra_data.is_empty() { self.extended_options.extra_data = Arc::new(vec![]); } if !self.extended_options.central_extra_data.is_empty() { self.extended_options.central_extra_data = Arc::new(vec![]); } self } } impl<T: FileOptionExtension> Default for FileOptions<'_, T> { /// Construct a new FileOptions object fn default() -> Self { Self { compression_method: Default::default(), compression_level: None, last_modified_time: DateTime::default_for_write(), permissions: None, large_file: false, encrypt_with: None, extended_options: T::default(), alignment: 1, #[cfg(feature = "deflate-zopfli")] zopfli_buffer_size: Some(1 << 15), } } } impl<W: Write + Seek> Write for ZipWriter<W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { if !self.writing_to_file { return Err(io::Error::new( io::ErrorKind::Other, "No file has been started", )); } if buf.is_empty() { return Ok(0); } match self.inner.ref_mut() { Some(ref mut w) => { let write_result = w.write(buf); if let Ok(count) = write_result { self.stats.update(&buf[0..count]); if self.stats.bytes_written > spec::ZIP64_BYTES_THR && !self.files.last_mut().unwrap().1.large_file { let _ = self.abort_file(); return Err(io::Error::new( io::ErrorKind::Other, "Large file option has not been set", )); } } write_result } None => Err(io::Error::new( io::ErrorKind::BrokenPipe, "write(): ZipWriter was already closed", )), } } fn flush(&mut self) -> io::Result<()> { match self.inner.ref_mut() { Some(ref mut w) => w.flush(), None => Err(io::Error::new( io::ErrorKind::BrokenPipe, "flush(): ZipWriter was already closed", )), } } } impl ZipWriterStats { fn update(&mut self, buf: &[u8]) { self.hasher.update(buf); self.bytes_written += buf.len() as u64; } } impl<A: Read + Write + Seek> ZipWriter<A> { /// Initializes the archive from an existing ZIP archive, making it ready for append. /// /// This uses a default configuration to initially read the archive. pub fn new_append(readwriter: A) -> ZipResult<ZipWriter<A>> { Self::new_append_with_config(Default::default(), readwriter) } /// Initializes the archive from an existing ZIP archive, making it ready for append. /// /// This uses the given read configuration to initially read the archive. pub fn new_append_with_config(config: Config, mut readwriter: A) -> ZipResult<ZipWriter<A>> { readwriter.seek(SeekFrom::Start(0))?; let shared = ZipArchive::get_metadata(config, &mut readwriter)?; Ok(ZipWriter { inner: Storer(MaybeEncrypted::Unencrypted(readwriter)), files: shared.files, stats: Default::default(), writing_to_file: false, comment: shared.comment, zip64_comment: shared.zip64_comment, writing_raw: true, // avoid recomputing the last file's header flush_on_finish_file: false, }) } /// `flush_on_finish_file` is designed to support a streaming `inner` that may unload flushed /// bytes. It flushes a file's header and body once it starts writing another file. A ZipWriter /// will not try to seek back into where a previous file was written unless /// either [`ZipWriter::abort_file`] is called while [`ZipWriter::is_writing_file`] returns /// false, or [`ZipWriter::deep_copy_file`] is called. In the latter case, it will only need to /// read previously-written files and not overwrite them. /// /// Note: when using an `inner` that cannot overwrite flushed bytes, do not wrap it in a /// [BufWriter], because that has a [Seek::seek] method that implicitly calls /// [BufWriter::flush], and ZipWriter needs to seek backward to update each file's header with /// the size and checksum after writing the body. /// /// This setting is false by default. pub fn set_flush_on_finish_file(&mut self, flush_on_finish_file: bool) { self.flush_on_finish_file = flush_on_finish_file; } } impl<A: Read + Write + Seek> ZipWriter<A> { /// Adds another copy of a file already in this archive. This will produce a larger but more /// widely-compatible archive compared to [Self::shallow_copy_file]. Does not copy alignment. pub fn deep_copy_file(&mut self, src_name: &str, dest_name: &str) -> ZipResult<()> { self.finish_file()?; if src_name == dest_name || self.files.contains_key(dest_name) { return Err(InvalidArchive("That file already exists")); } let write_position = self.inner.get_plain().stream_position()?; let src_index = self.index_by_name(src_name)?; let src_data = &mut self.files[src_index]; let src_data_start = src_data.data_start(); debug_assert!(src_data_start <= write_position); let mut compressed_size = src_data.compressed_size; if compressed_size > (write_position - src_data_start) { compressed_size = write_position - src_data_start; src_data.compressed_size = compressed_size; } let mut reader = BufReader::new(ZipFileReader::Raw(find_content( src_data, self.inner.get_plain(), )?)); let mut copy = Vec::with_capacity(compressed_size as usize); reader.read_to_end(&mut copy)?; drop(reader); self.inner .get_plain() .seek(SeekFrom::Start(write_position))?; let mut new_data = src_data.clone(); let dest_name_raw = dest_name.as_bytes(); new_data.file_name = dest_name.into(); new_data.file_name_raw = dest_name_raw.into(); new_data.is_utf8 = !dest_name.is_ascii(); new_data.header_start = write_position; let extra_data_start = write_position + size_of::<ZipLocalEntryBlock>() as u64 + new_data.file_name_raw.len() as u64; new_data.extra_data_start = Some(extra_data_start); let mut data_start = extra_data_start; if let Some(extra) = &src_data.extra_field { data_start += extra.len() as u64; } new_data.data_start.take(); new_data.data_start.get_or_init(|| data_start); new_data.central_header_start = 0; let block = new_data.local_block()?; let index = self.insert_file_data(new_data)?; let result = (|| { let plain_writer = self.inner.get_plain(); plain_writer.write_all(block.as_bytes())?; plain_writer.write_all(dest_name_raw)?; let new_data = &self.files[index]; if let Some(data) = &new_data.extra_field { plain_writer.write_all(data)?; } debug_assert_eq!(data_start, plain_writer.stream_position()?); self.writing_to_file = true; plain_writer.write_all(&copy) })(); self.ok_or_abort_file(result)?; // Copying will overwrite the central header self.files .values_mut() .for_each(|file| file.central_header_start = 0); self.writing_to_file = false; Ok(()) } /// Like `deep_copy_file`, but uses Path arguments. /// /// This function ensures that the '/' path separator is used and normalizes `.` and `..`. It /// ignores any `..` or Windows drive letter that would produce a path outside the ZIP file's /// root. pub fn deep_copy_file_from_path<T: AsRef<Path>, U: AsRef<Path>>( &mut self, src_path: T, dest_path: U, ) -> ZipResult<()> { let src = path_to_string(src_path); let dest = path_to_string(dest_path); self.deep_copy_file(&src, &dest) } /// Write the zip file into the backing stream, then produce a readable archive of that data. /// /// This method avoids parsing the central directory records at the end of the stream for /// a slight performance improvement over running [`ZipArchive::new()`] on the output of /// [`Self::finish()`]. /// ///``` /// # fn main() -> Result<(), zip::result::ZipError> { /// use std::io::{Cursor, prelude::*}; /// use zip::{ZipArchive, ZipWriter, write::SimpleFileOptions}; /// /// let buf = Cursor::new(Vec::new()); /// let mut zip = ZipWriter::new(buf); /// let options = SimpleFileOptions::default(); /// zip.start_file("a.txt", options)?; /// zip.write_all(b"hello\n")?; /// /// let mut zip = zip.finish_into_readable()?; /// let mut s: String = String::new(); /// zip.by_name("a.txt")?.read_to_string(&mut s)?; /// assert_eq!(s, "hello\n"); /// # Ok(()) /// # } ///``` pub fn finish_into_readable(mut self) -> ZipResult<ZipArchive<A>> { let central_start = self.finalize()?; let inner = mem::replace(&mut self.inner, Closed).unwrap(); let comment = mem::take(&mut self.comment); let zip64_comment = mem::take(&mut self.zip64_comment); let files = mem::take(&mut self.files); let archive = ZipArchive::from_finalized_writer(files, comment, zip64_comment, inner, central_start)?; Ok(archive) } } impl<W: Write + Seek> ZipWriter<W> { /// Initializes the archive. /// /// Before writing to this object, the [`ZipWriter::start_file`] function should be called. /// After a successful write, the file remains open for writing. After a failed write, call /// [`ZipWriter::is_writing_file`] to determine if the file remains open. pub fn new(inner: W) -> ZipWriter<W> { ZipWriter { inner: Storer(MaybeEncrypted::Unencrypted(inner)), files: IndexMap::new(), stats: Default::default(), writing_to_file: false, writing_raw: false, comment: Box::new([]), zip64_comment: None, flush_on_finish_file: false, } } /// Returns true if a file is currently open for writing. pub const fn is_writing_file(&self) -> bool { self.writing_to_file && !self.inner.is_closed() } /// Set ZIP archive comment. pub fn set_comment<S>(&mut self, comment: S) where S: Into<Box<str>>, { self.set_raw_comment(comment.into().into_boxed_bytes()) } /// Set ZIP archive comment. /// /// This sets the raw bytes of the comment. The comment /// is typically expected to be encoded in UTF-8. pub fn set_raw_comment(&mut self, comment: Box<[u8]>) { self.comment = comment; } /// Get ZIP archive comment. pub fn get_comment(&mut self) -> Result<&str, Utf8Error> { from_utf8(self.get_raw_comment()) } /// Get ZIP archive comment. /// /// This returns the raw bytes of the comment. The comment /// is typically expected to be encoded in UTF-8. pub const fn get_raw_comment(&self) -> &[u8] { &self.comment } /// Set ZIP64 archive comment. pub fn set_zip64_comment<S>(&mut self, comment: Option<S>) where S: Into<Box<str>>, { self.set_raw_zip64_comment(comment.map(|v| v.into().into_boxed_bytes())) } /// Set ZIP64 archive comment. /// /// This sets the raw bytes of the comment. The comment /// is typically expected to be encoded in UTF-8. pub fn set_raw_zip64_comment(&mut self, comment: Option<Box<[u8]>>) { self.zip64_comment = comment; } /// Get ZIP64 archive comment. pub fn get_zip64_comment(&mut self) -> Option<Result<&str, Utf8Error>> { self.get_raw_zip64_comment().map(from_utf8) } /// Get ZIP archive comment. /// /// This returns the raw bytes of the comment. The comment /// is typically expected to be encoded in UTF-8. pub fn get_raw_zip64_comment(&self) -> Option<&[u8]> { self.zip64_comment.as_deref() } /// Set the file length and crc32 manually. /// /// # Safety /// /// This overwrites the internal crc32 calculation. It should only be used in case /// the underlying [Write] is written independently and you need to adjust the zip metadata. pub unsafe fn set_file_metadata(&mut self, length: u64, crc32: u32) -> ZipResult<()> { if !self.writing_to_file { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, "No file has been started", ))); } self.stats.hasher = Hasher::new_with_initial_len(crc32, length); self.stats.bytes_written = length; Ok(()) } fn ok_or_abort_file<T, E: Into<ZipError>>(&mut self, result: Result<T, E>) -> ZipResult<T> { match result { Err(e) => { let _ = self.abort_file(); Err(e.into()) } Ok(t) => Ok(t), } } /// Start a new file for with the requested options. fn start_entry<S: ToString, T: FileOptionExtension>( &mut self, name: S, options: FileOptions<T>, raw_values: Option<ZipRawValues>, ) -> ZipResult<()> { self.finish_file()?; let header_start = self.inner.get_plain().stream_position()?; let raw_values = raw_values.unwrap_or(ZipRawValues { crc32: 0, compressed_size: 0, uncompressed_size: 0, }); let mut extra_data = match options.extended_options.extra_data() { Some(data) => data.to_vec(), None => vec![], }; let central_extra_data = options.extended_options.central_extra_data(); if let Some(zip64_block) = Zip64ExtraFieldBlock::maybe_new(options.large_file, 0, 0, header_start) { let mut new_extra_data = zip64_block.serialize().into_vec(); new_extra_data.append(&mut extra_data); extra_data = new_extra_data; } // Write AES encryption extra data. #[allow(unused_mut)] let mut aes_extra_data_start = 0; #[cfg(feature = "aes-crypto")] if let Some(EncryptWith::Aes { mode, .. }) = options.encrypt_with { let aes_dummy_extra_data = vec![0x02, 0x00, 0x41, 0x45, mode as u8, 0x00, 0x00].into_boxed_slice(); aes_extra_data_start = extra_data.len() as u64; ExtendedFileOptions::add_extra_data_unchecked( &mut extra_data, 0x9901, aes_dummy_extra_data, )?; } let (compression_method, aes_mode) = match options.encrypt_with { #[cfg(feature = "aes-crypto")] Some(EncryptWith::Aes { mode, .. }) => ( CompressionMethod::Aes, Some((mode, AesVendorVersion::Ae2, options.compression_method)), ), _ => (options.compression_method, None), }; let header_end = header_start + size_of::<ZipLocalEntryBlock>() as u64 + name.to_string().len() as u64; if options.alignment > 1 { let extra_data_end = header_end + extra_data.len() as u64; let align = options.alignment as u64; let unaligned_header_bytes = extra_data_end % align; if unaligned_header_bytes != 0 { let mut pad_length = (align - unaligned_header_bytes) as usize; while pad_length < 6 { pad_length += align as usize; } // Add an extra field to the extra_data, per APPNOTE 4.6.11 let mut pad_body = vec![0; pad_length - 4]; debug_assert!(pad_body.len() >= 2); [pad_body[0], pad_body[1]] = options.alignment.to_le_bytes(); ExtendedFileOptions::add_extra_data_unchecked( &mut extra_data, 0xa11e, pad_body.into_boxed_slice(), )?; debug_assert_eq!((extra_data.len() as u64 + header_end) % align, 0); } } let extra_data_len = extra_data.len(); if let Some(data) = central_extra_data { if extra_data_len + data.len() > u16::MAX as usize { return Err(InvalidArchive( "Extra data and central extra data must be less than 64KiB when combined", )); } ExtendedFileOptions::validate_extra_data(data, true)?; } let mut file = ZipFileData::initialize_local_block( name, &options, raw_values, header_start, None, aes_extra_data_start, compression_method, aes_mode, &extra_data, ); file.version_made_by = file.version_made_by.max(file.version_needed() as u8); file.extra_data_start = Some(header_end); let index = self.insert_file_data(file)?; self.writing_to_file = true; let result: ZipResult<()> = (|| { ExtendedFileOptions::validate_extra_data(&extra_data, false)?; let file = &mut self.files[index]; let block = file.local_block()?; let writer = self.inner.get_plain(); block.write(writer)?; // file name writer.write_all(&file.file_name_raw)?; if extra_data_len > 0 { writer.write_all(&extra_data)?; file.extra_field = Some(extra_data.into()); } Ok(()) })(); self.ok_or_abort_file(result)?; let writer = self.inner.get_plain(); self.stats.start = writer.stream_position()?; match options.encrypt_with { #[cfg(feature = "aes-crypto")] Some(EncryptWith::Aes { mode, password }) => { let aeswriter = AesWriter::new( mem::replace(&mut self.inner, Closed).unwrap(), mode, password.as_bytes(), )?; self.inner = Storer(MaybeEncrypted::Aes(aeswriter)); } Some(EncryptWith::ZipCrypto(keys, ..)) => { let mut zipwriter = crate::zipcrypto::ZipCryptoWriter { writer: mem::replace(&mut self.inner, Closed).unwrap(), buffer: vec![], keys, }; self.stats.start = zipwriter.writer.stream_position()?; // crypto_header is counted as part of the data let crypto_header = [0u8; 12]; let result = zipwriter.write_all(&crypto_header); self.ok_or_abort_file(result)?; self.inner = Storer(MaybeEncrypted::ZipCrypto(zipwriter)); } None => {} } let file = &mut self.files[index]; debug_assert!(file.data_start.get().is_none()); file.data_start.get_or_init(|| self.stats.start); self.stats.bytes_written = 0; self.stats.hasher = Hasher::new(); Ok(()) } fn insert_file_data(&mut self, file: ZipFileData) -> ZipResult<usize> { if self.files.contains_key(&file.file_name) { return Err(InvalidArchive("Duplicate filename")); } let name = file.file_name.to_owned(); self.files.insert(name.clone(), file); Ok(self.files.get_index_of(&name).unwrap()) } fn finish_file(&mut self) -> ZipResult<()> { if !self.writing_to_file { return Ok(()); } let make_plain_writer = self.inner.prepare_next_writer( Stored, None, #[cfg(feature = "deflate-zopfli")] None, )?; self.inner.switch_to(make_plain_writer)?; self.switch_to_non_encrypting_writer()?; let writer = self.inner.get_plain(); if !self.writing_raw { let file = match self.files.last_mut() { None => return Ok(()), Some((_, f)) => f, }; file.uncompressed_size = self.stats.bytes_written; let file_end = writer.stream_position()?; debug_assert!(file_end >= self.stats.start); file.compressed_size = file_end - self.stats.start; let mut crc = true; if let Some(aes_mode) = &mut file.aes_mode { // We prefer using AE-1 which provides an extra CRC check, but for small files we // switch to AE-2 to prevent being able to use the CRC value to to reconstruct the // unencrypted contents. // // C.f. https://www.winzip.com/en/support/aes-encryption/#crc-faq aes_mode.1 = if self.stats.bytes_written < 20 { crc = false; AesVendorVersion::Ae2 } else { AesVendorVersion::Ae1 }; } file.crc32 = if crc { self.stats.hasher.clone().finalize() } else { 0 }; update_aes_extra_data(writer, file)?; update_local_file_header(writer, file)?; writer.seek(SeekFrom::Start(file_end))?; } if self.flush_on_finish_file { let result = writer.flush(); self.ok_or_abort_file(result)?; } self.writing_to_file = false; Ok(()) } fn switch_to_non_encrypting_writer(&mut self) -> Result<(), ZipError> { match mem::replace(&mut self.inner, Closed) { #[cfg(feature = "aes-crypto")] Storer(MaybeEncrypted::Aes(writer)) => { self.inner = Storer(MaybeEncrypted::Unencrypted(writer.finish()?)); } Storer(MaybeEncrypted::ZipCrypto(writer)) => { let crc32 = self.stats.hasher.clone().finalize(); self.inner = Storer(MaybeEncrypted::Unencrypted(writer.finish(crc32)?)) } Storer(MaybeEncrypted::Unencrypted(w)) => { self.inner = Storer(MaybeEncrypted::Unencrypted(w)) } _ => unreachable!(), } Ok(()) } /// Removes the file currently being written from the archive if there is one, or else removes /// the file most recently written. pub fn abort_file(&mut self) -> ZipResult<()> { let (_, last_file) = self.files.pop().ok_or(ZipError::FileNotFound)?; let make_plain_writer = self.inner.prepare_next_writer( Stored, None, #[cfg(feature = "deflate-zopfli")] None, )?; self.inner.switch_to(make_plain_writer)?; self.switch_to_non_encrypting_writer()?; // Make sure this is the last file, and that no shallow copies of it remain; otherwise we'd // overwrite a valid file and corrupt the archive let rewind_safe: bool = match last_file.data_start.get() { None => self.files.is_empty(), Some(last_file_start) => self.files.values().all(|file| { file.data_start .get() .is_some_and(|start| start < last_file_start) }), }; if rewind_safe { self.inner .get_plain() .seek(SeekFrom::Start(last_file.header_start))?; } self.writing_to_file = false; Ok(()) } /// Create a file in the archive and start writing its' contents. The file must not have the /// same name as a file already in the archive. /// /// The data should be written using the [`Write`] implementation on this [`ZipWriter`] pub fn start_file<S: ToString, T: FileOptionExtension>( &mut self, name: S, mut options: FileOptions<T>, ) -> ZipResult<()> { Self::normalize_options(&mut options); let make_new_self = self.inner.prepare_next_writer( options.compression_method, options.compression_level, #[cfg(feature = "deflate-zopfli")] options.zopfli_buffer_size, )?; self.start_entry(name, options, None)?; let result = self.inner.switch_to(make_new_self); self.ok_or_abort_file(result)?; self.writing_raw = false; Ok(()) } /* TODO: link to/use Self::finish_into_readable() from https://github.com/zip-rs/zip/pull/400 in * this docstring. */ /// Copy over the entire contents of another archive verbatim. /// /// This method extracts file metadata from the `source` archive, then simply performs a single /// big [`io::copy()`](io::copy) to transfer all the actual file contents without any /// decompression or decryption. This is more performant than the equivalent operation of /// calling [`Self::raw_copy_file()`] for each entry from the `source` archive in sequence. /// ///``` /// # fn main() -> Result<(), zip::result::ZipError> { /// use std::io::{Cursor, prelude::*}; /// use zip::{ZipArchive, ZipWriter, write::SimpleFileOptions}; /// /// let buf = Cursor::new(Vec::new()); /// let mut zip = ZipWriter::new(buf); /// zip.start_file("a.txt", SimpleFileOptions::default())?; /// zip.write_all(b"hello\n")?; /// let src = ZipArchive::new(zip.finish()?)?; /// /// let buf = Cursor::new(Vec::new()); /// let mut zip = ZipWriter::new(buf); /// zip.start_file("b.txt", SimpleFileOptions::default())?; /// zip.write_all(b"hey\n")?; /// let src2 = ZipArchive::new(zip.finish()?)?; /// /// let buf = Cursor::new(Vec::new()); /// let mut zip = ZipWriter::new(buf); /// zip.merge_archive(src)?; /// zip.merge_archive(src2)?; /// let mut result = ZipArchive::new(zip.finish()?)?; /// /// let mut s: String = String::new(); /// result.by_name("a.txt")?.read_to_string(&mut s)?; /// assert_eq!(s, "hello\n"); /// s.clear(); /// result.by_name("b.txt")?.read_to_string(&mut s)?; /// assert_eq!(s, "hey\n"); /// # Ok(()) /// # } ///``` pub fn merge_archive<R>(&mut self, mut source: ZipArchive<R>) -> ZipResult<()> where R: Read + Seek, { self.finish_file()?; /* Ensure we accept the file contents on faith (and avoid overwriting the data). * See raw_copy_file_rename(). */ self.writing_to_file = true; self.writing_raw = true; let writer = self.inner.get_plain(); /* Get the file entries from the source archive. */ let new_files = source.merge_contents(writer)?; /* These file entries are now ours! */ self.files.extend(new_files); Ok(()) } fn normalize_options<T: FileOptionExtension>(options: &mut FileOptions<T>) { if options.permissions.is_none() { options.permissions = Some(0o644); } if !options.last_modified_time.is_valid() { options.last_modified_time = FileOptions::<T>::default().last_modified_time; } *options.permissions.as_mut().unwrap() |= ffi::S_IFREG; } /// Starts a file, taking a Path as argument. /// /// This function ensures that the '/' path separator is used and normalizes `.` and `..`. It /// ignores any `..` or Windows drive letter that would produce a path outside the ZIP file's /// root. pub fn start_file_from_path<E: FileOptionExtension, P: AsRef<Path>>( &mut self, path: P, options: FileOptions<E>, ) -> ZipResult<()> { self.start_file(path_to_string(path), options) } /// Add a new file using the already compressed data from a ZIP file being read and renames it, this /// allows faster copies of the `ZipFile` since there is no need to decompress and compress it again. /// Any `ZipFile` metadata is copied and not checked, for example the file CRC. /// /// ```no_run /// use std::fs::File; /// use std::io::{Read, Seek, Write}; /// use zip::{ZipArchive, ZipWriter}; /// /// fn copy_rename<R, W>( /// src: &mut ZipArchive<R>, /// dst: &mut ZipWriter<W>, /// ) -> zip::result::ZipResult<()> /// where /// R: Read + Seek, /// W: Write + Seek, /// { /// // Retrieve file entry by name /// let file = src.by_name("src_file.txt")?; /// /// // Copy and rename the previously obtained file entry to the destination zip archive /// dst.raw_copy_file_rename(file, "new_name.txt")?; /// /// Ok(()) /// } /// ``` pub fn raw_copy_file_rename<S: ToString>(&mut self, file: ZipFile, name: S) -> ZipResult<()> { let mut options = SimpleFileOptions::default() .large_file(file.compressed_size().max(file.size()) > spec::ZIP64_BYTES_THR) .last_modified_time( file.last_modified() .unwrap_or_else(DateTime::default_for_write), ) .compression_method(file.compression()); if let Some(perms) = file.unix_mode() { options = options.unix_permissions(perms); } Self::normalize_options(&mut options); self.raw_copy_file_rename_internal(file, name, options) } fn raw_copy_file_rename_internal<S: ToString>( &mut self, mut file: ZipFile, name: S, options: SimpleFileOptions, ) -> ZipResult<()> { let raw_values = ZipRawValues { crc32: file.crc32(), compressed_size: file.compressed_size(), uncompressed_size: file.size(), }; self.start_entry(name, options, Some(raw_values))?; self.writing_to_file = true; self.writing_raw = true; io::copy(&mut file.take_raw_reader()?, self)?; self.finish_file() } /// Like `raw_copy_file_to_path`, but uses Path arguments. /// /// This function ensures that the '/' path separator is used and normalizes `.` and `..`. It /// ignores any `..` or Windows drive letter that would produce a path outside the ZIP file's /// root. pub fn raw_copy_file_to_path<P: AsRef<Path>>( &mut self, file: ZipFile, path: P, ) -> ZipResult<()> { self.raw_copy_file_rename(file, path_to_string(path)) } /// Add a new file using the already compressed data from a ZIP file being read, this allows faster /// copies of the `ZipFile` since there is no need to decompress and compress it again. Any `ZipFile` /// metadata is copied and not checked, for example the file CRC. /// /// ```no_run /// use std::fs::File; /// use std::io::{Read, Seek, Write}; /// use zip::{ZipArchive, ZipWriter}; /// /// fn copy<R, W>(src: &mut ZipArchive<R>, dst: &mut ZipWriter<W>) -> zip::result::ZipResult<()> /// where /// R: Read + Seek, /// W: Write + Seek, /// { /// // Retrieve file entry by name /// let file = src.by_name("src_file.txt")?; /// /// // Copy the previously obtained file entry to the destination zip archive /// dst.raw_copy_file(file)?; /// /// Ok(()) /// } /// ``` pub fn raw_copy_file(&mut self, file: ZipFile) -> ZipResult<()> { let name = file.name().to_owned(); self.raw_copy_file_rename(file, name) } /// Add a new file using the already compressed data from a ZIP file being read and set the last /// modified date and unix mode. This allows faster copies of the `ZipFile` since there is no need /// to decompress and compress it again. Any `ZipFile` metadata other than the last modified date /// and the unix mode is copied and not checked, for example the file CRC. /// /// ```no_run /// use std::io::{Read, Seek, Write}; /// use zip::{DateTime, ZipArchive, ZipWriter}; /// /// fn copy<R, W>(src: &mut ZipArchive<R>, dst: &mut ZipWriter<W>) -> zip::result::ZipResult<()> /// where /// R: Read + Seek, /// W: Write + Seek, /// { /// // Retrieve file entry by name /// let file = src.by_name("src_file.txt")?; /// /// // Copy the previously obtained file entry to the destination zip archive /// dst.raw_copy_file_touch(file, DateTime::default(), Some(0o644))?; /// /// Ok(()) /// } /// ``` pub fn raw_copy_file_touch( &mut self, file: ZipFile, last_modified_time: DateTime, unix_mode: Option<u32>, ) -> ZipResult<()> { let name = file.name().to_owned(); let mut options = SimpleFileOptions::default() .large_file(file.compressed_size().max(file.size()) > spec::ZIP64_BYTES_THR) .last_modified_time(last_modified_time) .compression_method(file.compression()); if let Some(perms) = unix_mode { options = options.unix_permissions(perms); } Self::normalize_options(&mut options); self.raw_copy_file_rename_internal(file, name, options) } /// Add a directory entry. /// /// As directories have no content, you must not call [`ZipWriter::write`] before adding a new file. pub fn add_directory<S, T: FileOptionExtension>( &mut self, name: S, mut options: FileOptions<T>, ) -> ZipResult<()> where S: Into<String>, { if options.permissions.is_none() { options.permissions = Some(0o755); } *options.permissions.as_mut().unwrap() |= 0o40000; options.compression_method = Stored; options.encrypt_with = None; let name_as_string = name.into(); // Append a slash to the filename if it does not end with it. let name_with_slash = match name_as_string.chars().last() { Some('/') | Some('\\') => name_as_string, _ => name_as_string + "/", }; self.start_entry(name_with_slash, options, None)?; self.writing_to_file = false; self.switch_to_non_encrypting_writer()?; Ok(()) } /// Add a directory entry, taking a Path as argument. /// /// This function ensures that the '/' path separator is used and normalizes `.` and `..`. It /// ignores any `..` or Windows drive letter that would produce a path outside the ZIP file's /// root. pub fn add_directory_from_path<T: FileOptionExtension, P: AsRef<Path>>( &mut self, path: P, options: FileOptions<T>, ) -> ZipResult<()> { self.add_directory(path_to_string(path), options) } /// Finish the last file and write all other zip-structures /// /// This will return the writer, but one should normally not append any data to the end of the file. /// Note that the zipfile will also be finished on drop. pub fn finish(mut self) -> ZipResult<W> { let _central_start = self.finalize()?; let inner = mem::replace(&mut self.inner, Closed); Ok(inner.unwrap()) } /// Add a symlink entry. /// /// The zip archive will contain an entry for path `name` which is a symlink to `target`. /// /// No validation or normalization of the paths is performed. For best results, /// callers should normalize `\` to `/` and ensure symlinks are relative to other /// paths within the zip archive. /// /// WARNING: not all zip implementations preserve symlinks on extract. Some zip /// implementations may materialize a symlink as a regular file, possibly with the /// content incorrectly set to the symlink target. For maximum portability, consider /// storing a regular file instead. pub fn add_symlink<N: ToString, T: ToString, E: FileOptionExtension>( &mut self, name: N, target: T, mut options: FileOptions<E>, ) -> ZipResult<()> { if options.permissions.is_none() { options.permissions = Some(0o777); } *options.permissions.as_mut().unwrap() |= S_IFLNK; // The symlink target is stored as file content. And compressing the target path // likely wastes space. So always store. options.compression_method = Stored; self.start_entry(name, options, None)?; self.writing_to_file = true; let result = self.write_all(target.to_string().as_bytes()); self.ok_or_abort_file(result)?; self.writing_raw = false; self.finish_file()?; Ok(()) } /// Add a symlink entry, taking Paths to the location and target as arguments. /// /// This function ensures that the '/' path separator is used and normalizes `.` and `..`. It /// ignores any `..` or Windows drive letter that would produce a path outside the ZIP file's /// root. pub fn add_symlink_from_path<P: AsRef<Path>, T: AsRef<Path>, E: FileOptionExtension>( &mut self, path: P, target: T, options: FileOptions<E>, ) -> ZipResult<()> { self.add_symlink(path_to_string(path), path_to_string(target), options) } fn finalize(&mut self) -> ZipResult<u64> { self.finish_file()?; let mut central_start = self.write_central_and_footer()?; let writer = self.inner.get_plain(); let footer_end = writer.stream_position()?; let archive_end = writer.seek(SeekFrom::End(0))?; if footer_end < archive_end { // Data from an aborted file is past the end of the footer. // Overwrite the magic so the footer is no longer valid. writer.seek(SeekFrom::Start(central_start))?; writer.write_u32_le(0)?; writer.seek(SeekFrom::Start( footer_end - size_of::<Zip32CDEBlock>() as u64 - self.comment.len() as u64, ))?; writer.write_u32_le(0)?; // Rewrite the footer at the actual end. let central_and_footer_size = footer_end - central_start; writer.seek(SeekFrom::End(-(central_and_footer_size as i64)))?; central_start = self.write_central_and_footer()?; } Ok(central_start) } fn write_central_and_footer(&mut self) -> Result<u64, ZipError> { let writer = self.inner.get_plain(); let mut version_needed = MIN_VERSION as u16; let central_start = writer.stream_position()?; for file in self.files.values() { write_central_directory_header(writer, file)?; version_needed = version_needed.max(file.version_needed()); } let central_size = writer.stream_position()? - central_start; let is64 = self.files.len() > spec::ZIP64_ENTRY_THR || central_size.max(central_start) > spec::ZIP64_BYTES_THR || self.zip64_comment.is_some(); if is64 { let comment = self.zip64_comment.clone().unwrap_or_default(); let zip64_footer = spec::Zip64CentralDirectoryEnd { record_size: comment.len() as u64 + 44, version_made_by: version_needed, version_needed_to_extract: version_needed, disk_number: 0, disk_with_central_directory: 0, number_of_files_on_this_disk: self.files.len() as u64, number_of_files: self.files.len() as u64, central_directory_size: central_size, central_directory_offset: central_start, extensible_data_sector: comment, }; zip64_footer.write(writer)?; let zip64_footer = spec::Zip64CentralDirectoryEndLocator { disk_with_central_directory: 0, end_of_central_directory_offset: central_start + central_size, number_of_disks: 1, }; zip64_footer.write(writer)?; } let number_of_files = self.files.len().min(spec::ZIP64_ENTRY_THR) as u16; let footer = spec::Zip32CentralDirectoryEnd { disk_number: 0, disk_with_central_directory: 0, zip_file_comment: self.comment.clone(), number_of_files_on_this_disk: number_of_files, number_of_files, central_directory_size: central_size.min(spec::ZIP64_BYTES_THR) as u32, central_directory_offset: central_start.min(spec::ZIP64_BYTES_THR) as u32, }; footer.write(writer)?; Ok(central_start) } fn index_by_name(&self, name: &str) -> ZipResult<usize> { self.files.get_index_of(name).ok_or(ZipError::FileNotFound) } /// Adds another entry to the central directory referring to the same content as an existing /// entry. The file's local-file header will still refer to it by its original name, so /// unzipping the file will technically be unspecified behavior. [ZipArchive] ignores the /// filename in the local-file header and treat the central directory as authoritative. However, /// some other software (e.g. Minecraft) will refuse to extract a file copied this way. pub fn shallow_copy_file(&mut self, src_name: &str, dest_name: &str) -> ZipResult<()> { self.finish_file()?; if src_name == dest_name { return Err(InvalidArchive("Trying to copy a file to itself")); } let src_index = self.index_by_name(src_name)?; let mut dest_data = self.files[src_index].to_owned(); dest_data.file_name = dest_name.to_string().into(); dest_data.file_name_raw = dest_name.to_string().into_bytes().into(); dest_data.central_header_start = 0; self.insert_file_data(dest_data)?; Ok(()) } /// Like `shallow_copy_file`, but uses Path arguments. /// /// This function ensures that the '/' path separator is used and normalizes `.` and `..`. It /// ignores any `..` or Windows drive letter that would produce a path outside the ZIP file's /// root. pub fn shallow_copy_file_from_path<T: AsRef<Path>, U: AsRef<Path>>( &mut self, src_path: T, dest_path: U, ) -> ZipResult<()> { self.shallow_copy_file(&path_to_string(src_path), &path_to_string(dest_path)) } } impl<W: Write + Seek> Drop for ZipWriter<W> { fn drop(&mut self) { if !self.inner.is_closed() { if let Err(e) = self.finalize() { let _ = write!(io::stderr(), "ZipWriter drop failed: {:?}", e); } } } } type SwitchWriterFunction<W> = Box<dyn FnOnce(MaybeEncrypted<W>) -> GenericZipWriter<W>>; impl<W: Write + Seek> GenericZipWriter<W> { fn prepare_next_writer( &self, compression: CompressionMethod, compression_level: Option<i64>, #[cfg(feature = "deflate-zopfli")] zopfli_buffer_size: Option<usize>, ) -> ZipResult<SwitchWriterFunction<W>> { if let Closed = self { return Err( io::Error::new(io::ErrorKind::BrokenPipe, "ZipWriter was already closed").into(), ); } { #[allow(deprecated)] #[allow(unreachable_code)] match compression { Stored => { if compression_level.is_some() { Err(UnsupportedArchive("Unsupported compression level")) } else { Ok(Box::new(|bare| Storer(bare))) } } #[cfg(feature = "_deflate-any")] CompressionMethod::Deflated => { let default = if cfg!(all( feature = "deflate-zopfli", not(feature = "deflate-flate2") )) { 24 } else { Compression::default().level() as i64 }; let level = clamp_opt( compression_level.unwrap_or(default), deflate_compression_level_range(), ) .ok_or(UnsupportedArchive("Unsupported compression level"))? as u32; #[cfg(feature = "deflate-zopfli")] { let best_non_zopfli = Compression::best().level(); if level > best_non_zopfli { let options = Options { iteration_count: NonZeroU64::try_from( (level - best_non_zopfli) as u64, ) .unwrap(), ..Default::default() }; return Ok(Box::new(move |bare| match zopfli_buffer_size { Some(size) => GenericZipWriter::BufferedZopfliDeflater( BufWriter::with_capacity( size, zopfli::DeflateEncoder::new( options, Default::default(), bare, ), ), ), None => GenericZipWriter::ZopfliDeflater( zopfli::DeflateEncoder::new(options, Default::default(), bare), ), })); } } #[cfg(feature = "deflate-flate2")] { Ok(Box::new(move |bare| { GenericZipWriter::Deflater(DeflateEncoder::new( bare, Compression::new(level), )) })) } } #[cfg(feature = "deflate64")] CompressionMethod::Deflate64 => { Err(UnsupportedArchive("Compressing Deflate64 is not supported")) } #[cfg(feature = "bzip2")] CompressionMethod::Bzip2 => { let level = clamp_opt( compression_level.unwrap_or(bzip2::Compression::default().level() as i64), bzip2_compression_level_range(), ) .ok_or(UnsupportedArchive("Unsupported compression level"))? as u32; Ok(Box::new(move |bare| { GenericZipWriter::Bzip2(BzEncoder::new( bare, bzip2::Compression::new(level), )) })) } CompressionMethod::AES => Err(UnsupportedArchive( "AES encryption is enabled through FileOptions::with_aes_encryption", )), #[cfg(feature = "zstd")] CompressionMethod::Zstd => { let level = clamp_opt( compression_level.unwrap_or(zstd::DEFAULT_COMPRESSION_LEVEL as i64), zstd::compression_level_range(), ) .ok_or(UnsupportedArchive("Unsupported compression level"))?; Ok(Box::new(move |bare| { GenericZipWriter::Zstd(ZstdEncoder::new(bare, level as i32).unwrap()) })) } #[cfg(feature = "lzma")] CompressionMethod::Lzma => { Err(UnsupportedArchive("LZMA isn't supported for compression")) } #[cfg(feature = "xz")] CompressionMethod::Xz => { Err(UnsupportedArchive("XZ isn't supported for compression")) } CompressionMethod::Unsupported(..) => { Err(UnsupportedArchive("Unsupported compression")) } } } } fn switch_to(&mut self, make_new_self: SwitchWriterFunction<W>) -> ZipResult<()> { let bare = match mem::replace(self, Closed) { Storer(w) => w, #[cfg(feature = "deflate-flate2")] GenericZipWriter::Deflater(w) => w.finish()?, #[cfg(feature = "deflate-zopfli")] GenericZipWriter::ZopfliDeflater(w) => w.finish()?, #[cfg(feature = "deflate-zopfli")] GenericZipWriter::BufferedZopfliDeflater(w) => w .into_inner() .map_err(|e| ZipError::Io(e.into_error()))? .finish()?, #[cfg(feature = "bzip2")] GenericZipWriter::Bzip2(w) => w.finish()?, #[cfg(feature = "zstd")] GenericZipWriter::Zstd(w) => w.finish()?, Closed => { return Err(io::Error::new( io::ErrorKind::BrokenPipe, "ZipWriter was already closed", ) .into()); } }; *self = make_new_self(bare); Ok(()) } fn ref_mut(&mut self) -> Option<&mut dyn Write> { match self { Storer(ref mut w) => Some(w as &mut dyn Write), #[cfg(feature = "deflate-flate2")] GenericZipWriter::Deflater(ref mut w) => Some(w as &mut dyn Write), #[cfg(feature = "deflate-zopfli")] GenericZipWriter::ZopfliDeflater(w) => Some(w as &mut dyn Write), #[cfg(feature = "deflate-zopfli")] GenericZipWriter::BufferedZopfliDeflater(w) => Some(w as &mut dyn Write), #[cfg(feature = "bzip2")] GenericZipWriter::Bzip2(ref mut w) => Some(w as &mut dyn Write), #[cfg(feature = "zstd")] GenericZipWriter::Zstd(ref mut w) => Some(w as &mut dyn Write), Closed => None, } } const fn is_closed(&self) -> bool { matches!(*self, Closed) } fn get_plain(&mut self) -> &mut W { match *self { Storer(MaybeEncrypted::Unencrypted(ref mut w)) => w, _ => panic!("Should have switched to stored and unencrypted beforehand"), } } fn unwrap(self) -> W { match self { Storer(MaybeEncrypted::Unencrypted(w)) => w, _ => panic!("Should have switched to stored and unencrypted beforehand"), } } } #[cfg(feature = "_deflate-any")] fn deflate_compression_level_range() -> std::ops::RangeInclusive<i64> { let min = if cfg!(feature = "deflate-flate2") { Compression::fast().level() as i64 } else { Compression::best().level() as i64 + 1 }; let max = Compression::best().level() as i64 + if cfg!(feature = "deflate-zopfli") { u8::MAX as i64 } else { 0 }; min..=max } #[cfg(feature = "bzip2")] fn bzip2_compression_level_range() -> std::ops::RangeInclusive<i64> { let min = bzip2::Compression::fast().level() as i64; let max = bzip2::Compression::best().level() as i64; min..=max } #[cfg(any(feature = "_deflate-any", feature = "bzip2", feature = "zstd"))] fn clamp_opt<T: Ord + Copy, U: Ord + Copy + TryFrom<T>>( value: T, range: std::ops::RangeInclusive<U>, ) -> Option<T> { if range.contains(&value.try_into().ok()?) { Some(value) } else { None } } fn update_aes_extra_data<W: Write + Seek>(writer: &mut W, file: &mut ZipFileData) -> ZipResult<()> { let Some((aes_mode, version, compression_method)) = file.aes_mode else { return Ok(()); }; let extra_data_start = file.extra_data_start.unwrap(); writer.seek(SeekFrom::Start( extra_data_start + file.aes_extra_data_start, ))?; let mut buf = Vec::new(); /* TODO: implement this using the Block trait! */ // Extra field header ID. buf.write_u16_le(0x9901)?; // Data size. buf.write_u16_le(7)?; // Integer version number. buf.write_u16_le(version as u16)?; // Vendor ID. buf.write_all(b"AE")?; // AES encryption strength. buf.write_all(&[aes_mode as u8])?; // Real compression method. buf.write_u16_le(compression_method.serialize_to_u16())?; writer.write_all(&buf)?; let aes_extra_data_start = file.aes_extra_data_start as usize; let extra_field = Arc::get_mut(file.extra_field.as_mut().unwrap()).unwrap(); extra_field[aes_extra_data_start..aes_extra_data_start + buf.len()].copy_from_slice(&buf); Ok(()) } fn update_local_file_header<T: Write + Seek>(writer: &mut T, file: &ZipFileData) -> ZipResult<()> { const CRC32_OFFSET: u64 = 14; writer.seek(SeekFrom::Start(file.header_start + CRC32_OFFSET))?; writer.write_u32_le(file.crc32)?; if file.large_file { update_local_zip64_extra_field(writer, file)?; } else { // check compressed size as well as it can also be slightly larger than uncompressed size if file.compressed_size > spec::ZIP64_BYTES_THR { return Err(ZipError::Io(io::Error::new( io::ErrorKind::Other, "Large file option has not been set", ))); } writer.write_u32_le(file.compressed_size as u32)?; // uncompressed size is already checked on write to catch it as soon as possible writer.write_u32_le(file.uncompressed_size as u32)?; } Ok(()) } fn write_central_directory_header<T: Write>(writer: &mut T, file: &ZipFileData) -> ZipResult<()> { let block = file.block()?; block.write(writer)?; // file name writer.write_all(&file.file_name_raw)?; // extra field if let Some(extra_field) = &file.extra_field { writer.write_all(extra_field)?; } if let Some(central_extra_field) = &file.central_extra_field { writer.write_all(central_extra_field)?; } // file comment writer.write_all(file.file_comment.as_bytes())?; Ok(()) } fn update_local_zip64_extra_field<T: Write + Seek>( writer: &mut T, file: &ZipFileData, ) -> ZipResult<()> { let block = file.zip64_extra_field_block().ok_or(InvalidArchive( "Attempted to update a nonexistent ZIP64 extra field", ))?; let zip64_extra_field_start = file.header_start + size_of::<ZipLocalEntryBlock>() as u64 + file.file_name_raw.len() as u64; writer.seek(SeekFrom::Start(zip64_extra_field_start))?; let block = block.serialize(); writer.write_all(&block)?; Ok(()) } #[cfg(not(feature = "unreserved"))] const EXTRA_FIELD_MAPPING: [u16; 43] = [ 0x0007, 0x0008, 0x0009, 0x000a, 0x000c, 0x000d, 0x000e, 0x000f, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x0020, 0x0021, 0x0022, 0x0023, 0x0065, 0x0066, 0x4690, 0x07c8, 0x2605, 0x2705, 0x2805, 0x334d, 0x4341, 0x4453, 0x4704, 0x470f, 0x4b46, 0x4c41, 0x4d49, 0x4f4c, 0x5356, 0x554e, 0x5855, 0x6542, 0x756e, 0x7855, 0xa220, 0xfd4a, 0x9902, ]; #[cfg(test)] #[allow(unknown_lints)] // needless_update is new in clippy pre 1.29.0 #[allow(clippy::needless_update)] // So we can use the same FileOptions decls with and without zopfli_buffer_size #[allow(clippy::octal_escapes)] // many false positives in converted fuzz cases mod test { use super::{ExtendedFileOptions, FileOptions, FullFileOptions, ZipWriter}; use crate::compression::CompressionMethod; use crate::result::ZipResult; use crate::types::DateTime; use crate::write::EncryptWith::ZipCrypto; use crate::write::SimpleFileOptions; use crate::zipcrypto::ZipCryptoKeys; use crate::CompressionMethod::Stored; use crate::ZipArchive; use std::io::{Cursor, Read, Write}; use std::marker::PhantomData; use std::path::PathBuf; #[test] fn write_empty_zip() { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_comment("ZIP"); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 25); assert_eq!( *result.get_ref(), [80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 90, 73, 80] ); } #[test] fn unix_permissions_bitmask() { // unix_permissions() throws away upper bits. let options = SimpleFileOptions::default().unix_permissions(0o120777); assert_eq!(options.permissions, Some(0o777)); } #[test] fn write_zip_dir() { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer .add_directory( "test", SimpleFileOptions::default().last_modified_time( DateTime::from_date_and_time(2018, 8, 15, 20, 45, 6).unwrap(), ), ) .unwrap(); assert!(writer .write(b"writing to a directory is not allowed, and will not write any data") .is_err()); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 108); assert_eq!( *result.get_ref(), &[ 80u8, 75, 3, 4, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 116, 101, 115, 116, 47, 80, 75, 1, 2, 20, 3, 20, 0, 0, 0, 0, 0, 163, 165, 15, 77, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, 65, 0, 0, 0, 0, 116, 101, 115, 116, 47, 80, 75, 5, 6, 0, 0, 0, 0, 1, 0, 1, 0, 51, 0, 0, 0, 35, 0, 0, 0, 0, 0, ] as &[u8] ); } #[test] fn write_symlink_simple() { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer .add_symlink( "name", "target", SimpleFileOptions::default().last_modified_time( DateTime::from_date_and_time(2018, 8, 15, 20, 45, 6).unwrap(), ), ) .unwrap(); assert!(writer .write(b"writing to a symlink is not allowed and will not write any data") .is_err()); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 112); assert_eq!( *result.get_ref(), &[ 80u8, 75, 3, 4, 10, 0, 0, 0, 0, 0, 163, 165, 15, 77, 252, 47, 111, 70, 6, 0, 0, 0, 6, 0, 0, 0, 4, 0, 0, 0, 110, 97, 109, 101, 116, 97, 114, 103, 101, 116, 80, 75, 1, 2, 10, 3, 10, 0, 0, 0, 0, 0, 163, 165, 15, 77, 252, 47, 111, 70, 6, 0, 0, 0, 6, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 161, 0, 0, 0, 0, 110, 97, 109, 101, 80, 75, 5, 6, 0, 0, 0, 0, 1, 0, 1, 0, 50, 0, 0, 0, 40, 0, 0, 0, 0, 0 ] as &[u8], ); } #[test] fn test_path_normalization() { let mut path = PathBuf::new(); path.push("foo"); path.push("bar"); path.push(".."); path.push("."); path.push("example.txt"); let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer .start_file_from_path(path, SimpleFileOptions::default()) .unwrap(); let archive = writer.finish_into_readable().unwrap(); assert_eq!(Some("foo/example.txt"), archive.name_for_index(0)); } #[test] fn write_symlink_wonky_paths() { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer .add_symlink( "directory\\link", "/absolute/symlink\\with\\mixed/slashes", SimpleFileOptions::default().last_modified_time( DateTime::from_date_and_time(2018, 8, 15, 20, 45, 6).unwrap(), ), ) .unwrap(); assert!(writer .write(b"writing to a symlink is not allowed and will not write any data") .is_err()); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 162); assert_eq!( *result.get_ref(), &[ 80u8, 75, 3, 4, 10, 0, 0, 0, 0, 0, 163, 165, 15, 77, 95, 41, 81, 245, 36, 0, 0, 0, 36, 0, 0, 0, 14, 0, 0, 0, 100, 105, 114, 101, 99, 116, 111, 114, 121, 92, 108, 105, 110, 107, 47, 97, 98, 115, 111, 108, 117, 116, 101, 47, 115, 121, 109, 108, 105, 110, 107, 92, 119, 105, 116, 104, 92, 109, 105, 120, 101, 100, 47, 115, 108, 97, 115, 104, 101, 115, 80, 75, 1, 2, 10, 3, 10, 0, 0, 0, 0, 0, 163, 165, 15, 77, 95, 41, 81, 245, 36, 0, 0, 0, 36, 0, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 161, 0, 0, 0, 0, 100, 105, 114, 101, 99, 116, 111, 114, 121, 92, 108, 105, 110, 107, 80, 75, 5, 6, 0, 0, 0, 0, 1, 0, 1, 0, 60, 0, 0, 0, 80, 0, 0, 0, 0, 0 ] as &[u8], ); } #[test] fn write_mimetype_zip() { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::default(), permissions: Some(33188), large_file: false, encrypt_with: None, extended_options: (), alignment: 1, #[cfg(feature = "deflate-zopfli")] zopfli_buffer_size: None, }; writer.start_file("mimetype", options).unwrap(); writer .write_all(b"application/vnd.oasis.opendocument.text") .unwrap(); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 153); let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/mimetype.zip")); assert_eq!(result.get_ref(), &v); } const RT_TEST_TEXT: &str = "And I can't stop thinking about the moments that I lost to you\ And I can't stop thinking of things I used to do\ And I can't stop making bad decisions\ And I can't stop eating stuff you make me chew\ I put on a smile like you wanna see\ Another day goes by that I long to be like you"; const RT_TEST_FILENAME: &str = "subfolder/sub-subfolder/can't_stop.txt"; const SECOND_FILENAME: &str = "different_name.xyz"; const THIRD_FILENAME: &str = "third_name.xyz"; #[test] fn write_non_utf8() { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::default(), permissions: Some(33188), large_file: false, encrypt_with: None, extended_options: (), alignment: 1, #[cfg(feature = "deflate-zopfli")] zopfli_buffer_size: None, }; // GB18030 // "中文" = [214, 208, 206, 196] let filename = unsafe { String::from_utf8_unchecked(vec![214, 208, 206, 196]) }; writer.start_file(filename, options).unwrap(); writer.write_all(b"encoding GB18030").unwrap(); // SHIFT_JIS // "日文" = [147, 250, 149, 182] let filename = unsafe { String::from_utf8_unchecked(vec![147, 250, 149, 182]) }; writer.start_file(filename, options).unwrap(); writer.write_all(b"encoding SHIFT_JIS").unwrap(); let result = writer.finish().unwrap(); assert_eq!(result.get_ref().len(), 224); let mut v = Vec::new(); v.extend_from_slice(include_bytes!("../tests/data/non_utf8.zip")); assert_eq!(result.get_ref(), &v); } #[test] fn path_to_string() { let mut path = PathBuf::new(); #[cfg(windows)] path.push(r"C:\"); #[cfg(unix)] path.push("/"); path.push("windows"); path.push(".."); path.push("."); path.push("system32"); let path_str = super::path_to_string(&path); assert_eq!(&*path_str, "system32"); } #[test] fn test_shallow_copy() { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); let options = FileOptions { compression_method: CompressionMethod::default(), compression_level: None, last_modified_time: DateTime::default(), permissions: Some(33188), large_file: false, encrypt_with: None, extended_options: (), alignment: 0, #[cfg(feature = "deflate-zopfli")] zopfli_buffer_size: None, }; writer.start_file(RT_TEST_FILENAME, options).unwrap(); writer.write_all(RT_TEST_TEXT.as_ref()).unwrap(); writer .shallow_copy_file(RT_TEST_FILENAME, SECOND_FILENAME) .unwrap(); writer .shallow_copy_file(RT_TEST_FILENAME, SECOND_FILENAME) .expect_err("Duplicate filename"); let zip = writer.finish().unwrap(); let mut writer = ZipWriter::new_append(zip).unwrap(); writer .shallow_copy_file(SECOND_FILENAME, SECOND_FILENAME) .expect_err("Duplicate filename"); let mut reader = writer.finish_into_readable().unwrap(); let mut file_names: Vec<&str> = reader.file_names().collect(); file_names.sort(); let mut expected_file_names = vec![RT_TEST_FILENAME, SECOND_FILENAME]; expected_file_names.sort(); assert_eq!(file_names, expected_file_names); let mut first_file_content = String::new(); reader .by_name(RT_TEST_FILENAME) .unwrap() .read_to_string(&mut first_file_content) .unwrap(); assert_eq!(first_file_content, RT_TEST_TEXT); let mut second_file_content = String::new(); reader .by_name(SECOND_FILENAME) .unwrap() .read_to_string(&mut second_file_content) .unwrap(); assert_eq!(second_file_content, RT_TEST_TEXT); } #[test] fn test_deep_copy() { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); let options = FileOptions { compression_method: CompressionMethod::default(), compression_level: None, last_modified_time: DateTime::default(), permissions: Some(33188), large_file: false, encrypt_with: None, extended_options: (), alignment: 0, #[cfg(feature = "deflate-zopfli")] zopfli_buffer_size: None, }; writer.start_file(RT_TEST_FILENAME, options).unwrap(); writer.write_all(RT_TEST_TEXT.as_ref()).unwrap(); writer .deep_copy_file(RT_TEST_FILENAME, SECOND_FILENAME) .unwrap(); let zip = writer.finish().unwrap(); let mut writer = ZipWriter::new_append(zip).unwrap(); writer .deep_copy_file(RT_TEST_FILENAME, THIRD_FILENAME) .unwrap(); let zip = writer.finish().unwrap(); let mut reader = ZipArchive::new(zip).unwrap(); let mut file_names: Vec<&str> = reader.file_names().collect(); file_names.sort(); let mut expected_file_names = vec![RT_TEST_FILENAME, SECOND_FILENAME, THIRD_FILENAME]; expected_file_names.sort(); assert_eq!(file_names, expected_file_names); let mut first_file_content = String::new(); reader .by_name(RT_TEST_FILENAME) .unwrap() .read_to_string(&mut first_file_content) .unwrap(); assert_eq!(first_file_content, RT_TEST_TEXT); let mut second_file_content = String::new(); reader .by_name(SECOND_FILENAME) .unwrap() .read_to_string(&mut second_file_content) .unwrap(); assert_eq!(second_file_content, RT_TEST_TEXT); } #[test] fn duplicate_filenames() { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer .start_file("foo/bar/test", SimpleFileOptions::default()) .unwrap(); writer .write_all("The quick brown 🦊 jumps over the lazy 🐕".as_bytes()) .unwrap(); writer .start_file("foo/bar/test", SimpleFileOptions::default()) .expect_err("Expected duplicate filename not to be allowed"); } #[test] fn test_filename_looks_like_zip64_locator() { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer .start_file( "PK\u{6}\u{7}\0\0\0\u{11}\0\0\0\0\0\0\0\0\0\0\0\0", SimpleFileOptions::default(), ) .unwrap(); let zip = writer.finish().unwrap(); let _ = ZipArchive::new(zip).unwrap(); } #[test] fn test_filename_looks_like_zip64_locator_2() { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer .start_file( "PK\u{6}\u{6}\0\0\0\0\0\0\0\0\0\0PK\u{6}\u{7}\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", SimpleFileOptions::default(), ) .unwrap(); let zip = writer.finish().unwrap(); let _ = ZipArchive::new(zip).unwrap(); } #[test] fn test_filename_looks_like_zip64_locator_2a() { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer .start_file( "PK\u{6}\u{6}PK\u{6}\u{7}\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", SimpleFileOptions::default(), ) .unwrap(); let zip = writer.finish().unwrap(); let _ = ZipArchive::new(zip).unwrap(); } #[test] fn test_filename_looks_like_zip64_locator_3() { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer .start_file("\0PK\u{6}\u{6}", SimpleFileOptions::default()) .unwrap(); writer .start_file( "\0\u{4}\0\0PK\u{6}\u{7}\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\u{3}", SimpleFileOptions::default(), ) .unwrap(); let zip = writer.finish().unwrap(); let _ = ZipArchive::new(zip).unwrap(); } #[test] fn test_filename_looks_like_zip64_locator_4() { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer .start_file("PK\u{6}\u{6}", SimpleFileOptions::default()) .unwrap(); writer .start_file("\0\0\0\0\0\0", SimpleFileOptions::default()) .unwrap(); writer .start_file("\0", SimpleFileOptions::default()) .unwrap(); writer.start_file("", SimpleFileOptions::default()).unwrap(); writer .start_file("\0\0", SimpleFileOptions::default()) .unwrap(); writer .start_file( "\0\0\0PK\u{6}\u{7}\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", SimpleFileOptions::default(), ) .unwrap(); let zip = writer.finish().unwrap(); let _ = ZipArchive::new(zip).unwrap(); } #[test] fn test_filename_looks_like_zip64_locator_5() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer .add_directory("", SimpleFileOptions::default().with_alignment(21)) .unwrap(); let mut writer = ZipWriter::new_append(writer.finish().unwrap()).unwrap(); writer.shallow_copy_file("/", "").unwrap(); writer.shallow_copy_file("", "\0").unwrap(); writer.shallow_copy_file("\0", "PK\u{6}\u{6}").unwrap(); let mut writer = ZipWriter::new_append(writer.finish().unwrap()).unwrap(); writer .start_file("\0\0\0\0\0\0", SimpleFileOptions::default()) .unwrap(); let mut writer = ZipWriter::new_append(writer.finish().unwrap()).unwrap(); writer .start_file( "#PK\u{6}\u{7}\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", SimpleFileOptions::default(), ) .unwrap(); let zip = writer.finish().unwrap(); let _ = ZipArchive::new(zip).unwrap(); Ok(()) } #[test] fn remove_shallow_copy_keeps_original() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer .start_file("original", SimpleFileOptions::default()) .unwrap(); writer.write_all(RT_TEST_TEXT.as_bytes()).unwrap(); writer .shallow_copy_file("original", "shallow_copy") .unwrap(); writer.abort_file().unwrap(); let mut zip = ZipArchive::new(writer.finish().unwrap()).unwrap(); let mut file = zip.by_name("original").unwrap(); let mut contents = Vec::new(); file.read_to_end(&mut contents).unwrap(); assert_eq!(RT_TEST_TEXT.as_bytes(), contents); Ok(()) } #[test] fn remove_encrypted_file() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); let first_file_options = SimpleFileOptions::default() .with_alignment(65535) .with_deprecated_encryption(b"Password"); writer.start_file("", first_file_options).unwrap(); writer.abort_file().unwrap(); let zip = writer.finish().unwrap(); let mut writer = ZipWriter::new(zip); writer.start_file("", SimpleFileOptions::default()).unwrap(); Ok(()) } #[test] fn remove_encrypted_aligned_symlink() -> ZipResult<()> { let mut options = SimpleFileOptions::default(); options = options.with_deprecated_encryption(b"Password"); options.alignment = 65535; let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.add_symlink("", "s\t\0\0ggggg\0\0", options).unwrap(); writer.abort_file().unwrap(); let zip = writer.finish().unwrap(); let mut writer = ZipWriter::new_append(zip).unwrap(); writer.start_file("", SimpleFileOptions::default()).unwrap(); Ok(()) } #[cfg(feature = "deflate-zopfli")] #[test] fn zopfli_empty_write() -> ZipResult<()> { let mut options = SimpleFileOptions::default(); options = options .compression_method(CompressionMethod::default()) .compression_level(Some(264)); let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.start_file("", options).unwrap(); writer.write_all(&[]).unwrap(); writer.write_all(&[]).unwrap(); Ok(()) } #[test] fn crash_with_no_features() -> ZipResult<()> { const ORIGINAL_FILE_NAME: &str = "PK\u{6}\u{6}\0\0\0\0\0\0\0\0\0\u{2}g\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\u{1}\0\0\0\0\0\0\0\0\0\0PK\u{6}\u{7}\0\0\0\0\0\0\0\0\0\0\0\0\u{7}\0\t'"; let mut writer = ZipWriter::new(Cursor::new(Vec::new())); let mut options = SimpleFileOptions::default(); options = options.with_alignment(3584).compression_method(Stored); writer.start_file(ORIGINAL_FILE_NAME, options)?; let archive = writer.finish()?; let mut writer = ZipWriter::new_append(archive)?; writer.shallow_copy_file(ORIGINAL_FILE_NAME, "\u{6}\\")?; writer.finish()?; Ok(()) } #[test] fn test_alignment() { let page_size = 4096; let options = SimpleFileOptions::default() .compression_method(Stored) .with_alignment(page_size); let mut zip = ZipWriter::new(Cursor::new(Vec::new())); let contents = b"sleeping"; let () = zip.start_file("sleep", options).unwrap(); let _count = zip.write(&contents[..]).unwrap(); let mut zip = zip.finish_into_readable().unwrap(); let file = zip.by_index(0).unwrap(); assert_eq!(file.name(), "sleep"); assert_eq!(file.data_start(), page_size.into()); } #[test] fn test_alignment_2() { let page_size = 4096; let mut data = Vec::new(); { let options = SimpleFileOptions::default() .compression_method(Stored) .with_alignment(page_size); let mut zip = ZipWriter::new(Cursor::new(&mut data)); let contents = b"sleeping"; let () = zip.start_file("sleep", options).unwrap(); let _count = zip.write(&contents[..]).unwrap(); } assert_eq!(data[4096..4104], b"sleeping"[..]); { let mut zip = ZipArchive::new(Cursor::new(&mut data)).unwrap(); let file = zip.by_index(0).unwrap(); assert_eq!(file.name(), "sleep"); assert_eq!(file.data_start(), page_size.into()); } } #[test] fn test_crash_short_read() { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); let comment = vec![ 1, 80, 75, 5, 6, 237, 237, 237, 237, 237, 237, 237, 237, 44, 255, 191, 255, 255, 255, 255, 255, 255, 255, 255, 16, ] .into_boxed_slice(); writer.set_raw_comment(comment); let options = SimpleFileOptions::default() .compression_method(Stored) .with_alignment(11823); writer.start_file("", options).unwrap(); writer.write_all(&[255, 255, 44, 255, 0]).unwrap(); let written = writer.finish().unwrap(); let _ = ZipWriter::new_append(written).unwrap(); } #[cfg(all(feature = "_deflate-any", feature = "aes-crypto"))] #[test] fn test_fuzz_failure_2024_05_08() -> ZipResult<()> { let mut first_writer = ZipWriter::new(Cursor::new(Vec::new())); let mut second_writer = ZipWriter::new(Cursor::new(Vec::new())); let options = SimpleFileOptions::default() .compression_method(Stored) .with_alignment(46036); second_writer.add_symlink("\0", "", options)?; let second_archive = second_writer.finish_into_readable()?.into_inner(); let mut second_writer = ZipWriter::new_append(second_archive)?; let options = SimpleFileOptions::default() .compression_method(CompressionMethod::Deflated) .large_file(true) .with_alignment(46036) .with_aes_encryption(crate::AesMode::Aes128, "\0\0"); second_writer.add_symlink("", "", options)?; let second_archive = second_writer.finish_into_readable()?.into_inner(); let mut second_writer = ZipWriter::new_append(second_archive)?; let options = SimpleFileOptions::default().compression_method(Stored); second_writer.start_file(" ", options)?; let second_archive = second_writer.finish_into_readable()?; first_writer.merge_archive(second_archive)?; let _ = ZipArchive::new(first_writer.finish()?)?; Ok(()) } #[cfg(feature = "bzip2")] #[test] fn test_fuzz_failure_2024_06_08() -> ZipResult<()> { use crate::write::ExtendedFileOptions; use CompressionMethod::Bzip2; let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); const SYMLINK_PATH: &str = "PK\u{6}\u{6}K\u{6}\u{6}\u{6}\0\0\0\0\u{18}\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\u{1}\0\0\0\0\0\0\0\0\u{1}\0\0PK\u{1}\u{2},\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\u{1}\0\0PK\u{1}\u{2},\0\0\0\0\0\0\0\0\0\0l\0\0\0\0\0\0PK\u{6}\u{7}P\0\0\0\0\0\0\0\0\0\0\0\0\u{1}\0\0"; let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FileOptions { compression_method: Bzip2, compression_level: None, last_modified_time: DateTime::from_date_and_time(1980, 5, 20, 21, 0, 57)?, permissions: None, large_file: false, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 2048, ..Default::default() }; writer.add_symlink_from_path(SYMLINK_PATH, "||\0\0\0\0", options)?; writer = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; writer.deep_copy_file_from_path(SYMLINK_PATH, "")?; writer = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; writer.abort_file()?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; writer.deep_copy_file_from_path(SYMLINK_PATH, "foo")?; let _ = writer.finish_into_readable()?; Ok(()) } #[test] fn test_short_extra_data() { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FileOptions { extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![99, 0, 15, 0, 207].into(), }, ..Default::default() }; assert!(writer.start_file_from_path("", options).is_err()); } #[test] #[cfg(not(feature = "unreserved"))] fn test_invalid_extra_data() -> ZipResult<()> { use crate::write::ExtendedFileOptions; let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time(1980, 1, 4, 6, 54, 0)?, permissions: None, large_file: false, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![ 7, 0, 15, 0, 207, 117, 177, 117, 112, 2, 0, 255, 255, 131, 255, 255, 255, 80, 185, ] .into(), }, alignment: 32787, ..Default::default() }; assert!(writer.start_file_from_path("", options).is_err()); Ok(()) } #[test] #[cfg(not(feature = "unreserved"))] fn test_invalid_extra_data_unreserved() { use crate::write::ExtendedFileOptions; let mut writer = ZipWriter::new(Cursor::new(Vec::new())); let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time(2021, 8, 8, 1, 0, 29).unwrap(), permissions: None, large_file: true, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![ 1, 41, 4, 0, 1, 255, 245, 117, 117, 112, 5, 0, 80, 255, 149, 255, 247, ] .into(), }, alignment: 4103, ..Default::default() }; assert!(writer.start_file_from_path("", options).is_err()); } #[cfg(feature = "deflate64")] #[test] fn test_fuzz_crash_2024_06_13a() -> ZipResult<()> { use crate::write::ExtendedFileOptions; use CompressionMethod::Deflate64; let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FileOptions { compression_method: Deflate64, compression_level: None, last_modified_time: DateTime::from_date_and_time(2039, 4, 17, 6, 18, 19)?, permissions: None, large_file: true, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 4, ..Default::default() }; writer.add_directory_from_path("", options)?; let _ = writer.finish_into_readable()?; Ok(()) } #[test] fn test_fuzz_crash_2024_06_13b() -> ZipResult<()> { use crate::write::ExtendedFileOptions; let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time(1980, 4, 14, 6, 11, 54)?, permissions: None, large_file: false, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 185, ..Default::default() }; writer.add_symlink_from_path("", "", options)?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer.deep_copy_file_from_path("", "_copy")?; let _ = writer.finish_into_readable()?; Ok(()) } #[test] fn test_fuzz_crash_2024_06_14() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FullFileOptions { compression_method: Stored, large_file: true, alignment: 93, ..Default::default() }; writer.start_file_from_path("\0", options)?; writer = ZipWriter::new_append(writer.finish()?)?; writer.deep_copy_file_from_path("\0", "")?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer.deep_copy_file_from_path("", "copy")?; let _ = writer.finish_into_readable()?; Ok(()) } #[test] fn test_fuzz_crash_2024_06_14a() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time(2083, 5, 30, 21, 45, 35)?, permissions: None, large_file: false, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 2565, ..Default::default() }; writer.add_symlink_from_path("", "", options)?; writer.abort_file()?; let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::default(), permissions: None, large_file: false, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 0, ..Default::default() }; writer.start_file_from_path("", options)?; let _ = writer.finish_into_readable()?; Ok(()) } #[allow(deprecated)] #[test] fn test_fuzz_crash_2024_06_14b() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time(2078, 3, 6, 12, 48, 58)?, permissions: None, large_file: true, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 65521, ..Default::default() }; writer.start_file_from_path("\u{4}\0@\n//\u{c}", options)?; writer = ZipWriter::new_append(writer.finish()?)?; writer.abort_file()?; let options = FileOptions { compression_method: CompressionMethod::Unsupported(65535), compression_level: None, last_modified_time: DateTime::from_date_and_time(2055, 10, 2, 11, 48, 49)?, permissions: None, large_file: true, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![255, 255, 1, 0, 255, 0, 0, 0, 0].into(), central_extra_data: vec![].into(), }, alignment: 65535, ..Default::default() }; writer.add_directory_from_path("", options)?; let _ = writer.finish_into_readable()?; Ok(()) } #[test] fn test_fuzz_crash_2024_06_14c() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time(2060, 4, 6, 13, 13, 3)?, permissions: None, large_file: true, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 0, ..Default::default() }; writer.start_file_from_path("\0", options)?; writer.write_all(&([]))?; writer = ZipWriter::new_append(writer.finish()?)?; writer.deep_copy_file_from_path("\0", "")?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer.deep_copy_file_from_path("", "_copy")?; let _ = writer.finish_into_readable()?; Ok(()) } #[cfg(all(feature = "_deflate-any", feature = "aes-crypto"))] #[test] fn test_fuzz_crash_2024_06_14d() -> ZipResult<()> { use crate::write::EncryptWith::Aes; use crate::AesMode::Aes256; use CompressionMethod::Deflated; let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FileOptions { compression_method: Deflated, compression_level: Some(5), last_modified_time: DateTime::from_date_and_time(2107, 4, 8, 15, 54, 19)?, permissions: None, large_file: true, encrypt_with: Some(Aes { mode: Aes256, password: "", }), extended_options: ExtendedFileOptions { extra_data: vec![2, 0, 1, 0, 0].into(), central_extra_data: vec![ 35, 229, 2, 0, 41, 41, 231, 44, 2, 0, 52, 233, 82, 201, 0, 0, 3, 0, 2, 0, 233, 255, 3, 0, 2, 0, 26, 154, 38, 251, 0, 0, ] .into(), }, alignment: 65535, ..Default::default() }; assert!(writer.add_directory_from_path("", options).is_err()); Ok(()) } #[test] fn test_fuzz_crash_2024_06_14e() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time(1988, 1, 1, 1, 6, 26)?, permissions: None, large_file: true, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![76, 0, 1, 0, 0, 2, 0, 0, 0].into(), central_extra_data: vec![ 1, 149, 1, 0, 255, 3, 0, 0, 0, 2, 255, 0, 0, 12, 65, 1, 0, 0, 67, 149, 0, 0, 76, 149, 2, 0, 149, 149, 67, 149, 0, 0, ] .into(), }, alignment: 65535, ..Default::default() }; assert!(writer.add_directory_from_path("", options).is_err()); let _ = writer.finish_into_readable()?; Ok(()) } #[allow(deprecated)] #[test] fn test_fuzz_crash_2024_06_17() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FileOptions { compression_method: CompressionMethod::Unsupported( 65535, ), compression_level: Some(5), last_modified_time: DateTime::from_date_and_time( 2107, 2, 8, 15, 0, 0, )?, permissions: None, large_file: true, encrypt_with: Some(ZipCrypto( ZipCryptoKeys::of( 0x63ff, 0xc62d3103, 0xfffe00ea, ), PhantomData, )), extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 255, ..Default::default() }; writer.add_symlink_from_path("1\0PK\u{6}\u{6}\u{b}\u{6}\u{6}\u{6}\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\u{1}\0\0\0\0\0\0\0\0\u{b}\0\0PK\u{1}\u{2},\0\0\0\0\0\0\0\0\0\0\0\u{10}\0\0\0K\u{6}\u{6}\0\0\0\0\0\0\0\0PK\u{2}\u{6}", "", options)?; writer = ZipWriter::new_append( writer.finish_into_readable()?.into_inner(), )?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer = ZipWriter::new_append( writer.finish_into_readable()?.into_inner(), )?; let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time( 1992, 7, 3, 0, 0, 0, )?, permissions: None, large_file: true, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 43, ..Default::default() }; writer.start_file_from_path( "\0\0\0\u{3}\0\u{1a}\u{1a}\u{1a}\u{1a}\u{1a}\u{1a}", options, )?; let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time( 2006, 3, 27, 2, 24, 26, )?, permissions: None, large_file: false, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 26, ..Default::default() }; writer.start_file_from_path("\0K\u{6}\u{6}\0PK\u{6}\u{7}PK\u{6}\u{6}\0\0\0\0\0\0\0\0PK\u{2}\u{6}", options)?; writer = ZipWriter::new_append( writer.finish_into_readable()?.into_inner(), )?; let options = FileOptions { compression_method: Stored, compression_level: Some(17), last_modified_time: DateTime::from_date_and_time( 2103, 4, 10, 23, 15, 18, )?, permissions: Some(3284386755), large_file: true, encrypt_with: Some(ZipCrypto( ZipCryptoKeys::of( 0x8888c5bf, 0x88888888, 0xff888888, ), PhantomData, )), extended_options: ExtendedFileOptions { extra_data: vec![3, 0, 1, 0, 255, 144, 136, 0, 0] .into(), central_extra_data: vec![].into(), }, alignment: 65535, ..Default::default() }; writer.add_symlink_from_path("", "\nu", options)?; writer = ZipWriter::new_append(writer.finish()?)?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer = ZipWriter::new_append( writer.finish_into_readable()?.into_inner(), )?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer = ZipWriter::new_append(writer.finish()?)?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; writer.abort_file()?; let options = FileOptions { compression_method: CompressionMethod::Unsupported(49603), compression_level: Some(20), last_modified_time: DateTime::from_date_and_time( 2047, 4, 14, 3, 15, 14, )?, permissions: Some(3284386755), large_file: true, encrypt_with: Some(ZipCrypto( ZipCryptoKeys::of(0xc3, 0x0, 0x0), PhantomData, )), extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 0, ..Default::default() }; writer.add_directory_from_path("", options)?; writer.deep_copy_file_from_path("/", "")?; writer.shallow_copy_file_from_path("", "copy")?; assert!(writer.shallow_copy_file_from_path("", "copy").is_err()); assert!(writer.shallow_copy_file_from_path("", "copy").is_err()); assert!(writer.shallow_copy_file_from_path("", "copy").is_err()); assert!(writer.shallow_copy_file_from_path("", "copy").is_err()); assert!(writer.shallow_copy_file_from_path("", "copy").is_err()); assert!(writer.shallow_copy_file_from_path("", "copy").is_err()); writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; let _ = writer.finish_into_readable()?; Ok(()) } #[test] fn test_fuzz_crash_2024_06_17a() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); const PATH_1: &str = "\0I\01\0P\0\0\u{2}\0\0\u{1a}\u{1a}\u{1a}\u{1a}\u{1b}\u{1a}UT\u{5}\0\0\u{1a}\u{1a}\u{1a}\u{1a}UT\u{5}\0\u{1}\0\u{1a}\u{1a}\u{1a}UT\t\0uc\u{5}\0\0\0\0\u{7f}\u{7f}\u{7f}\u{7f}PK\u{6}"; let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time(1981, 1, 1, 0, 24, 21)?, permissions: Some(16908288), large_file: false, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 20555, ..Default::default() }; writer.start_file_from_path( "\0\u{7}\u{1}\0\0\0\0\0\0\0\0\u{1}\0\0PK\u{1}\u{2};", options, )?; writer.write_all( &([ 255, 255, 255, 255, 253, 253, 253, 203, 203, 203, 253, 253, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 249, 191, 225, 225, 241, 197, ]), )?; writer.write_all( &([ 197, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 75, 0, ]), )?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time(1980, 11, 14, 10, 46, 47)?, permissions: None, large_file: false, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 0, ..Default::default() }; writer.start_file_from_path(PATH_1, options)?; writer.deep_copy_file_from_path(PATH_1, "eee\u{6}\0\0\0\0\0\0\0\0\0\0\0$\0\0\0\0\0\0\u{7f}\u{7f}PK\u{6}\u{6}K\u{6}\u{6}\u{6}\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\u{1}\0\0\0\0\0\0\0\0\u{1}\0\0PK\u{1}\u{1e},\0\0\0\0\0\0\0\0\0\0\0\u{8}\0*\0\0\u{1}PK\u{6}\u{7}PK\u{6}\u{6}\0\0\0\0\0\0\0\0}K\u{2}\u{6}")?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; writer.deep_copy_file_from_path(PATH_1, "")?; writer = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; writer.shallow_copy_file_from_path("", "copy")?; writer = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; let _ = writer.finish_into_readable()?; Ok(()) } #[test] #[allow(clippy::octal_escapes)] #[cfg(feature = "bzip2")] fn test_fuzz_crash_2024_06_17b() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time( 1981, 1, 1, 0, 0, 21, )?, permissions: Some(16908288), large_file: false, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 20555, ..Default::default() }; writer.start_file_from_path("\0\u{7}\u{1}\0\0\0\0\0\0\0\0\u{1}\0\0PK\u{1}\u{2};\u{1a}\u{18}\u{1a}UT\t.........................\0u", options)?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; let options = FileOptions { compression_method: CompressionMethod::Bzip2, compression_level: Some(5), last_modified_time: DateTime::from_date_and_time( 2055, 7, 7, 3, 6, 6, )?, permissions: None, large_file: false, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 0, ..Default::default() }; writer.start_file_from_path("\0\0\0\0..\0\0\0\0\0\u{7f}\u{7f}PK\u{6}\u{6}K\u{6}\u{6}\u{6}\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\u{1}\0\0\0\0\0\0\0\0\u{1}\0\0PK\u{1}\u{1e},\0\0\0\0\0\0\0\0\0\0\0\u{8}\0*\0\0\u{1}PK\u{6}\u{7}PK\u{6}\u{6}\0\0\0\0\0\0\0\0}K\u{2}\u{6}", options)?; writer = ZipWriter::new_append( writer.finish_into_readable()?.into_inner(), )?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer = ZipWriter::new_append( writer.finish_into_readable()?.into_inner(), )?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; let _ = writer.finish_into_readable()?; Ok(()) } #[test] fn test_fuzz_crash_2024_06_18() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_raw_comment(Box::<[u8]>::from([ 80, 75, 5, 6, 255, 255, 255, 255, 255, 255, 80, 75, 5, 6, 255, 255, 255, 255, 255, 255, 13, 0, 13, 13, 13, 13, 13, 255, 255, 255, 255, 255, 255, 255, 255, ])); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); writer.set_raw_comment(Box::new([])); writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer = ZipWriter::new_append(writer.finish()?)?; let _ = writer.finish_into_readable()?; Ok(()) } #[test] fn test_fuzz_crash_2024_06_18a() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); writer.set_raw_comment(Box::<[u8]>::from([])); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FullFileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time(2107, 4, 8, 14, 0, 19)?, permissions: None, large_file: false, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![ 182, 180, 1, 0, 180, 182, 74, 0, 0, 200, 0, 0, 0, 2, 0, 0, 0, ] .into(), central_extra_data: vec![].into(), }, alignment: 1542, ..Default::default() }; writer.start_file_from_path("\0\0PK\u{6}\u{6}K\u{6}PK\u{3}\u{4}\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\u{1}\0\0\0\0\0\0\0\0\u{1}\u{1}\0PK\u{1}\u{2},\0\0\0\0\0\0\0\0\0\0\0P\u{7}\u{4}/.\0KP\0\0;\0\0\0\u{1e}\0\0\0\0\0\0\0\0\0\0\0\0\0", options)?; let finished = writer.finish_into_readable()?; assert_eq!(1, finished.file_names().count()); writer = ZipWriter::new_append(finished.into_inner())?; let options = FullFileOptions { compression_method: Stored, compression_level: Some(5), last_modified_time: DateTime::from_date_and_time(2107, 4, 1, 0, 0, 0)?, permissions: None, large_file: false, encrypt_with: Some(ZipCrypto( ZipCryptoKeys::of(0x0, 0x62e4b50, 0x100), PhantomData, )), ..Default::default() }; writer.add_symlink_from_path( "\0K\u{6}\0PK\u{6}\u{7}PK\u{6}\u{6}\0\0\0\0\0\0\0\0PK\u{2}\u{6}", "\u{8}\0\0\0\0/\0", options, )?; let finished = writer.finish_into_readable()?; assert_eq!(2, finished.file_names().count()); writer = ZipWriter::new_append(finished.into_inner())?; assert_eq!(2, writer.files.len()); writer }; let finished = sub_writer.finish_into_readable()?; assert_eq!(2, finished.file_names().count()); writer.merge_archive(finished)?; writer = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; let _ = writer.finish_into_readable()?; Ok(()) } #[cfg(all(feature = "bzip2", feature = "aes-crypto"))] #[test] fn test_fuzz_crash_2024_06_18b() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(true); writer.set_raw_comment([0].into()); writer = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; assert_eq!(writer.get_raw_comment()[0], 0); let options = FileOptions { compression_method: CompressionMethod::Bzip2, compression_level: None, last_modified_time: DateTime::from_date_and_time(2009, 6, 3, 13, 37, 39)?, permissions: Some(2644352413), large_file: true, encrypt_with: Some(crate::write::EncryptWith::Aes { mode: crate::AesMode::Aes256, password: "", }), extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 255, ..Default::default() }; writer.add_symlink_from_path("", "", options)?; writer.deep_copy_file_from_path("", "PK\u{5}\u{6}\0\0\0\0\0\0\0\0\0\0\0\0\0\u{4}\0\0\0")?; writer = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; assert_eq!(writer.get_raw_comment()[0], 0); writer.deep_copy_file_from_path( "PK\u{5}\u{6}\0\0\0\0\0\0\0\0\0\0\0\0\0\u{4}\0\0\0", "\u{2}yy\u{5}qu\0", )?; let finished = writer.finish()?; let archive = ZipArchive::new(finished.clone())?; assert_eq!(archive.comment(), [0]); writer = ZipWriter::new_append(finished)?; assert_eq!(writer.get_raw_comment()[0], 0); let _ = writer.finish_into_readable()?; Ok(()) } #[test] fn test_fuzz_crash_2024_06_19() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time(1980, 3, 1, 19, 55, 58)?, permissions: None, large_file: false, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 256, ..Default::default() }; writer.start_file_from_path( "\0\0\0PK\u{5}\u{6}\0\0\0\0\u{1}\0\u{12}\u{6}\0\0\0\0\0\u{1}\0\0\0\0\0\0\0\0\0", options, )?; writer.set_flush_on_finish_file(false); writer.shallow_copy_file_from_path( "\0\0\0PK\u{5}\u{6}\0\0\0\0\u{1}\0\u{12}\u{6}\0\0\0\0\0\u{1}\0\0\0\0\0\0\0\0\0", "", )?; writer.set_flush_on_finish_file(false); writer.deep_copy_file_from_path("", "copy")?; writer.abort_file()?; writer.set_flush_on_finish_file(false); writer.set_raw_comment([255, 0].into()); writer.abort_file()?; assert_eq!(writer.get_raw_comment(), [255, 0]); writer = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; assert_eq!(writer.get_raw_comment(), [255, 0]); writer.set_flush_on_finish_file(false); let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::default(), permissions: None, large_file: false, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, ..Default::default() }; writer.start_file_from_path("", options)?; assert_eq!(writer.get_raw_comment(), [255, 0]); let archive = writer.finish_into_readable()?; assert_eq!(archive.comment(), [255, 0]); Ok(()) } #[test] fn fuzz_crash_2024_06_21() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FullFileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time(1980, 2, 1, 0, 0, 0)?, permissions: None, large_file: false, encrypt_with: None, ..Default::default() }; const LONG_PATH: &str = "\0@PK\u{6}\u{6}\u{7}\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0@/\0\0\00ΝPK\u{5}\u{6}O\0\u{10}\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0@PK\u{6}\u{7}\u{6}\0/@\0\0\0\0\0\0\0\0 \0\0"; writer.start_file_from_path(LONG_PATH, options)?; writer = ZipWriter::new_append(writer.finish()?)?; writer.deep_copy_file_from_path(LONG_PATH, "oo\0\0\0")?; writer.abort_file()?; writer.set_raw_comment([33].into()); let archive = writer.finish_into_readable()?; writer = ZipWriter::new_append(archive.into_inner())?; assert!(writer.get_raw_comment().starts_with(&[33])); let archive = writer.finish_into_readable()?; assert!(archive.comment().starts_with(&[33])); Ok(()) } #[test] #[cfg(feature = "bzip2")] fn fuzz_crash_2024_07_17() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FileOptions { compression_method: CompressionMethod::Bzip2, compression_level: None, last_modified_time: DateTime::from_date_and_time(2095, 2, 16, 21, 0, 1)?, permissions: Some(84238341), large_file: true, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![117, 99, 6, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 2, 0, 0, 0].into(), central_extra_data: vec![].into(), }, alignment: 65535, ..Default::default() }; writer.start_file_from_path("", options)?; //writer = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; writer.deep_copy_file_from_path("", "copy")?; let _ = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; Ok(()) } #[test] fn fuzz_crash_2024_07_19() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time(1980, 6, 1, 0, 34, 47)?, permissions: None, large_file: true, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 45232, ..Default::default() }; writer.add_directory_from_path("", options)?; writer.deep_copy_file_from_path("/", "")?; writer = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; writer.deep_copy_file_from_path("", "copy")?; let _ = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; Ok(()) } #[test] #[cfg(feature = "aes-crypto")] fn fuzz_crash_2024_07_19a() -> ZipResult<()> { use crate::write::EncryptWith::Aes; use crate::AesMode::Aes128; let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time(2107, 6, 5, 13, 0, 21)?, permissions: None, large_file: true, encrypt_with: Some(Aes { mode: Aes128, password: "", }), extended_options: ExtendedFileOptions { extra_data: vec![3, 0, 4, 0, 209, 53, 53, 8, 2, 61, 0, 0].into(), central_extra_data: vec![].into(), }, alignment: 65535, ..Default::default() }; writer.start_file_from_path("", options)?; let _ = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; Ok(()) } #[test] fn fuzz_crash_2024_07_20() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(true); let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time(2041, 8, 2, 19, 38, 0)?, permissions: None, large_file: false, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 0, ..Default::default() }; writer.add_directory_from_path("\0\0\0\0\0\0\07黻", options)?; let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.set_flush_on_finish_file(false); let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::default(), permissions: None, large_file: false, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 4, ..Default::default() }; writer.add_directory_from_path("\0\0\0黻", options)?; writer = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; writer.abort_file()?; let options = FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time(1980, 1, 1, 0, 7, 0)?, permissions: Some(2663103419), large_file: false, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 32256, ..Default::default() }; writer.add_directory_from_path("\0", options)?; writer = ZipWriter::new_append(writer.finish()?)?; writer }; writer.merge_archive(sub_writer.finish_into_readable()?)?; let _ = ZipWriter::new_append(writer.finish_into_readable()?.into_inner())?; Ok(()) } #[test] fn fuzz_crash_2024_07_21() -> ZipResult<()> { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); let sub_writer = { let mut writer = ZipWriter::new(Cursor::new(Vec::new())); writer.add_directory_from_path( "", FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::from_date_and_time(2105, 8, 1, 15, 0, 0)?, permissions: None, large_file: false, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 0, ..Default::default() }, )?; writer.abort_file()?; let mut writer = ZipWriter::new_append(writer.finish()?)?; writer.add_directory_from_path( "", FileOptions { compression_method: Stored, compression_level: None, last_modified_time: DateTime::default(), permissions: None, large_file: false, encrypt_with: None, extended_options: ExtendedFileOptions { extra_data: vec![].into(), central_extra_data: vec![].into(), }, alignment: 16, ..Default::default() }, )?; ZipWriter::new_append(writer.finish()?)? }; writer.merge_archive(sub_writer.finish_into_readable()?)?; let writer = ZipWriter::new_append(writer.finish()?)?; let _ = writer.finish_into_readable()?; Ok(()) } }
//! Implementation of the ZipCrypto algorithm //! //! The following paper was used to implement the ZipCrypto algorithm: //! [https://courses.cs.ut.ee/MTAT.07.022/2015_fall/uploads/Main/dmitri-report-f15-16.pdf](https://courses.cs.ut.ee/MTAT.07.022/2015_fall/uploads/Main/dmitri-report-f15-16.pdf) use std::fmt::{Debug, Formatter}; use std::hash::Hash; use std::num::Wrapping; use crate::result::ZipError; /// A container to hold the current key state #[cfg_attr(fuzzing, derive(arbitrary::Arbitrary))] #[derive(Clone, Copy, Hash, Ord, PartialOrd, Eq, PartialEq)] pub(crate) struct ZipCryptoKeys { key_0: Wrapping<u32>, key_1: Wrapping<u32>, key_2: Wrapping<u32>, } impl Debug for ZipCryptoKeys { #[allow(unreachable_code)] fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { #[cfg(not(any(test, fuzzing)))] { use std::collections::hash_map::DefaultHasher; use std::hash::Hasher; let mut t = DefaultHasher::new(); self.hash(&mut t); f.write_fmt(format_args!("ZipCryptoKeys(hash {})", t.finish())) } #[cfg(any(test, fuzzing))] f.write_fmt(format_args!( "ZipCryptoKeys::of({:#10x},{:#10x},{:#10x})", self.key_0, self.key_1, self.key_2 )) } } impl ZipCryptoKeys { const fn new() -> ZipCryptoKeys { ZipCryptoKeys { key_0: Wrapping(0x12345678), key_1: Wrapping(0x23456789), key_2: Wrapping(0x34567890), } } #[allow(unused)] pub const fn of(key_0: u32, key_1: u32, key_2: u32) -> ZipCryptoKeys { ZipCryptoKeys { key_0: Wrapping(key_0), key_1: Wrapping(key_1), key_2: Wrapping(key_2), } } fn update(&mut self, input: u8) { self.key_0 = ZipCryptoKeys::crc32(self.key_0, input); self.key_1 = (self.key_1 + (self.key_0 & Wrapping(0xff))) * Wrapping(0x08088405) + Wrapping(1); self.key_2 = ZipCryptoKeys::crc32(self.key_2, (self.key_1 >> 24).0 as u8); } fn stream_byte(&mut self) -> u8 { let temp: Wrapping<u16> = Wrapping(self.key_2.0 as u16) | Wrapping(3); ((temp * (temp ^ Wrapping(1))) >> 8).0 as u8 } fn decrypt_byte(&mut self, cipher_byte: u8) -> u8 { let plain_byte: u8 = self.stream_byte() ^ cipher_byte; self.update(plain_byte); plain_byte } #[allow(dead_code)] fn encrypt_byte(&mut self, plain_byte: u8) -> u8 { let cipher_byte: u8 = self.stream_byte() ^ plain_byte; self.update(plain_byte); cipher_byte } fn crc32(crc: Wrapping<u32>, input: u8) -> Wrapping<u32> { (crc >> 8) ^ Wrapping(CRCTABLE[((crc & Wrapping(0xff)).0 as u8 ^ input) as usize]) } pub(crate) fn derive(password: &[u8]) -> ZipCryptoKeys { let mut keys = ZipCryptoKeys::new(); for byte in password.iter() { keys.update(*byte); } keys } } /// A ZipCrypto reader with unverified password pub struct ZipCryptoReader<R> { file: R, keys: ZipCryptoKeys, } pub enum ZipCryptoValidator { PkzipCrc32(u32), InfoZipMsdosTime(u16), } impl<R: std::io::Read> ZipCryptoReader<R> { /// Note: The password is `&[u8]` and not `&str` because the /// [zip specification](https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.3.TXT) /// does not specify password encoding (see function `update_keys` in the specification). /// Therefore, if `&str` was used, the password would be UTF-8 and it /// would be impossible to decrypt files that were encrypted with a /// password byte sequence that is unrepresentable in UTF-8. pub fn new(file: R, password: &[u8]) -> ZipCryptoReader<R> { ZipCryptoReader { file, keys: ZipCryptoKeys::derive(password), } } /// Read the ZipCrypto header bytes and validate the password. pub fn validate( mut self, validator: ZipCryptoValidator, ) -> Result<ZipCryptoReaderValid<R>, ZipError> { // ZipCrypto prefixes a file with a 12 byte header let mut header_buf = [0u8; 12]; self.file.read_exact(&mut header_buf)?; for byte in header_buf.iter_mut() { *byte = self.keys.decrypt_byte(*byte); } match validator { ZipCryptoValidator::PkzipCrc32(crc32_plaintext) => { // PKZIP before 2.0 used 2 byte CRC check. // PKZIP 2.0+ used 1 byte CRC check. It's more secure. // We also use 1 byte CRC. if (crc32_plaintext >> 24) as u8 != header_buf[11] { return Err(ZipError::InvalidPassword); } } ZipCryptoValidator::InfoZipMsdosTime(last_mod_time) => { // Info-ZIP modification to ZipCrypto format: // If bit 3 of the general purpose bit flag is set // (indicates that the file uses a data-descriptor section), // it uses high byte of 16-bit File Time. // Info-ZIP code probably writes 2 bytes of File Time. // We check only 1 byte. if (last_mod_time >> 8) as u8 != header_buf[11] { return Err(ZipError::InvalidPassword); } } } Ok(ZipCryptoReaderValid { reader: self }) } } #[allow(unused)] pub(crate) struct ZipCryptoWriter<W> { pub(crate) writer: W, pub(crate) buffer: Vec<u8>, pub(crate) keys: ZipCryptoKeys, } impl<W: std::io::Write> ZipCryptoWriter<W> { #[allow(unused)] pub(crate) fn finish(mut self, crc32: u32) -> std::io::Result<W> { self.buffer[11] = (crc32 >> 24) as u8; for byte in self.buffer.iter_mut() { *byte = self.keys.encrypt_byte(*byte); } self.writer.write_all(&self.buffer)?; self.writer.flush()?; Ok(self.writer) } } impl<W: std::io::Write> std::io::Write for ZipCryptoWriter<W> { fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> { self.buffer.extend_from_slice(buf); Ok(buf.len()) } fn flush(&mut self) -> std::io::Result<()> { Ok(()) } } /// A ZipCrypto reader with verified password pub struct ZipCryptoReaderValid<R> { reader: ZipCryptoReader<R>, } impl<R: std::io::Read> std::io::Read for ZipCryptoReaderValid<R> { fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { // Note: There might be potential for optimization. Inspiration can be found at: // https://github.com/kornelski/7z/blob/master/CPP/7zip/Crypto/ZipCrypto.cpp let n = self.reader.file.read(buf)?; for byte in buf.iter_mut().take(n) { *byte = self.reader.keys.decrypt_byte(*byte); } Ok(n) } } impl<R: std::io::Read> ZipCryptoReaderValid<R> { /// Consumes this decoder, returning the underlying reader. pub fn into_inner(self) -> R { self.reader.file } } static CRCTABLE: [u32; 256] = [ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d, ];
fn main() { // Force the `std` feature in some cases let target_arch = std::env::var("CARGO_CFG_TARGET_ARCH").unwrap_or_default(); let target_os = std::env::var("CARGO_CFG_TARGET_OS").unwrap_or_default(); if target_arch == "wasm32" || target_os == "hermit" { println!("cargo:rustc-cfg=feature=\"std\""); } }
// This file has been generated by ./update_consts.sh pub const BLOCKSIZELOG_MAX: u32 = zstd_sys::ZSTD_BLOCKSIZELOG_MAX; pub const BLOCKSIZE_MAX: u32 = zstd_sys::ZSTD_BLOCKSIZE_MAX; pub const CLEVEL_DEFAULT: CompressionLevel = zstd_sys::ZSTD_CLEVEL_DEFAULT as CompressionLevel; pub const CONTENTSIZE_ERROR: u64 = zstd_sys::ZSTD_CONTENTSIZE_ERROR as u64; pub const CONTENTSIZE_UNKNOWN: u64 = zstd_sys::ZSTD_CONTENTSIZE_UNKNOWN as u64; pub const MAGIC_DICTIONARY: u32 = zstd_sys::ZSTD_MAGIC_DICTIONARY; pub const MAGICNUMBER: u32 = zstd_sys::ZSTD_MAGICNUMBER; pub const MAGIC_SKIPPABLE_MASK: u32 = zstd_sys::ZSTD_MAGIC_SKIPPABLE_MASK; pub const MAGIC_SKIPPABLE_START: u32 = zstd_sys::ZSTD_MAGIC_SKIPPABLE_START; pub const VERSION_MAJOR: u32 = zstd_sys::ZSTD_VERSION_MAJOR; pub const VERSION_MINOR: u32 = zstd_sys::ZSTD_VERSION_MINOR; pub const VERSION_NUMBER: u32 = zstd_sys::ZSTD_VERSION_NUMBER; pub const VERSION_RELEASE: u32 = zstd_sys::ZSTD_VERSION_RELEASE;
// This file has been generated by ./update_consts.sh pub const BLOCKSIZE_MAX_MIN: u32 = zstd_sys::ZSTD_BLOCKSIZE_MAX_MIN; pub const BLOCKSPLITTER_LEVEL_MAX: u32 = zstd_sys::ZSTD_BLOCKSPLITTER_LEVEL_MAX; pub const CHAINLOG_MAX_32: u32 = zstd_sys::ZSTD_CHAINLOG_MAX_32; pub const CHAINLOG_MAX_64: u32 = zstd_sys::ZSTD_CHAINLOG_MAX_64; pub const CHAINLOG_MIN: u32 = zstd_sys::ZSTD_CHAINLOG_MIN; pub const FRAMEHEADERSIZE_MAX: u32 = zstd_sys::ZSTD_FRAMEHEADERSIZE_MAX; pub const HASHLOG_MIN: u32 = zstd_sys::ZSTD_HASHLOG_MIN; pub const LDM_BUCKETSIZELOG_MAX: u32 = zstd_sys::ZSTD_LDM_BUCKETSIZELOG_MAX; pub const LDM_BUCKETSIZELOG_MIN: u32 = zstd_sys::ZSTD_LDM_BUCKETSIZELOG_MIN; pub const LDM_HASHLOG_MIN: u32 = zstd_sys::ZSTD_LDM_HASHLOG_MIN; pub const LDM_HASHRATELOG_MIN: u32 = zstd_sys::ZSTD_LDM_HASHRATELOG_MIN; pub const LDM_MINMATCH_MAX: u32 = zstd_sys::ZSTD_LDM_MINMATCH_MAX; pub const LDM_MINMATCH_MIN: u32 = zstd_sys::ZSTD_LDM_MINMATCH_MIN; pub const MINMATCH_MAX: u32 = zstd_sys::ZSTD_MINMATCH_MAX; pub const MINMATCH_MIN: u32 = zstd_sys::ZSTD_MINMATCH_MIN; pub const OVERLAPLOG_MAX: u32 = zstd_sys::ZSTD_OVERLAPLOG_MAX; pub const OVERLAPLOG_MIN: u32 = zstd_sys::ZSTD_OVERLAPLOG_MIN; pub const SEARCHLOG_MIN: u32 = zstd_sys::ZSTD_SEARCHLOG_MIN; pub const SKIPPABLEHEADERSIZE: u32 = zstd_sys::ZSTD_SKIPPABLEHEADERSIZE; pub const SRCSIZEHINT_MIN: u32 = zstd_sys::ZSTD_SRCSIZEHINT_MIN; pub const TARGETCBLOCKSIZE_MAX: u32 = zstd_sys::ZSTD_TARGETCBLOCKSIZE_MAX; pub const TARGETCBLOCKSIZE_MIN: u32 = zstd_sys::ZSTD_TARGETCBLOCKSIZE_MIN; pub const TARGETLENGTH_MAX: u32 = zstd_sys::ZSTD_TARGETLENGTH_MAX; pub const TARGETLENGTH_MIN: u32 = zstd_sys::ZSTD_TARGETLENGTH_MIN; pub const WINDOWLOG_LIMIT_DEFAULT: u32 = zstd_sys::ZSTD_WINDOWLOG_LIMIT_DEFAULT; pub const WINDOWLOG_MAX_32: u32 = zstd_sys::ZSTD_WINDOWLOG_MAX_32; pub const WINDOWLOG_MAX_64: u32 = zstd_sys::ZSTD_WINDOWLOG_MAX_64; pub const WINDOWLOG_MIN: u32 = zstd_sys::ZSTD_WINDOWLOG_MIN;
// This file has been generated by ./update_consts.sh pub const SEEKABLE_FRAMEINDEX_TOOLARGE: u64 = zstd_sys::ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE as u64; pub const SEEKABLE_MAGICNUMBER: u32 = zstd_sys::ZSTD_SEEKABLE_MAGICNUMBER; pub const SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE: u32 = zstd_sys::ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE; pub const SEEKABLE_MAXFRAMES: u32 = zstd_sys::ZSTD_SEEKABLE_MAXFRAMES; pub const seekTableFooterSize: u32 = zstd_sys::ZSTD_seekTableFooterSize;
#![no_std] //! Minimal safe wrapper around zstd-sys. //! //! This crates provides a minimal translation of the [zstd-sys] methods. //! For a more comfortable high-level library, see the [zstd] crate. //! //! [zstd-sys]: https://crates.io/crates/zstd-sys //! [zstd]: https://crates.io/crates/zstd //! //! Most of the functions here map 1-for-1 to a function from //! [the C zstd library][zstd-c] mentioned in their descriptions. //! Check the [source documentation][doc] for more information on their //! behaviour. //! //! [doc]: https://facebook.github.io/zstd/zstd_manual.html //! [zstd-c]: https://facebook.github.io/zstd/ //! //! Features denoted as experimental in the C library are hidden behind an //! `experimental` feature. #![cfg_attr(feature = "doc-cfg", feature(doc_cfg))] // TODO: Use alloc feature instead to implement stuff for Vec // TODO: What about Cursor? #[cfg(feature = "std")] extern crate std; #[cfg(test)] mod tests; #[cfg(feature = "seekable")] pub mod seekable; // Re-export zstd-sys pub use zstd_sys; /// How to compress data. pub use zstd_sys::ZSTD_strategy as Strategy; /// Reset directive. // pub use zstd_sys::ZSTD_ResetDirective as ResetDirective; use core::ffi::{c_char, c_int, c_ulonglong, c_void}; use core::marker::PhantomData; use core::num::{NonZeroU32, NonZeroU64}; use core::ops::{Deref, DerefMut}; use core::ptr::NonNull; use core::str; include!("constants.rs"); #[cfg(feature = "experimental")] include!("constants_experimental.rs"); #[cfg(feature = "seekable")] include!("constants_seekable.rs"); /// Represents the compression level used by zstd. pub type CompressionLevel = i32; /// Represents a possible error from the zstd library. pub type ErrorCode = usize; /// Wrapper result around most zstd functions. /// /// Either a success code (usually number of bytes written), or an error code. pub type SafeResult = Result<usize, ErrorCode>; /// Indicates an error happened when parsing the frame content size. /// /// The stream may be corrupted, or the given frame prefix was too small. #[derive(Debug)] pub struct ContentSizeError; impl core::fmt::Display for ContentSizeError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.write_str("Could not get content size") } } /// Returns true if code represents error. fn is_error(code: usize) -> bool { // Safety: Just FFI unsafe { zstd_sys::ZSTD_isError(code) != 0 } } /// Parse the result code /// /// Returns the number of bytes written if the code represents success, /// or the error message code otherwise. fn parse_code(code: usize) -> SafeResult { if !is_error(code) { Ok(code) } else { Err(code) } } /// Parse a content size value. /// /// zstd uses 2 special content size values to indicate either unknown size or parsing error. fn parse_content_size( content_size: u64, ) -> Result<Option<u64>, ContentSizeError> { match content_size { CONTENTSIZE_ERROR => Err(ContentSizeError), CONTENTSIZE_UNKNOWN => Ok(None), other => Ok(Some(other)), } } fn ptr_void(src: &[u8]) -> *const c_void { src.as_ptr() as *const c_void } fn ptr_mut_void(dst: &mut (impl WriteBuf + ?Sized)) -> *mut c_void { dst.as_mut_ptr() as *mut c_void } /// Returns the ZSTD version. /// /// Returns `major * 10_000 + minor * 100 + patch`. /// So 1.5.3 would be returned as `10_503`. pub fn version_number() -> u32 { // Safety: Just FFI unsafe { zstd_sys::ZSTD_versionNumber() as u32 } } /// Returns a string representation of the ZSTD version. /// /// For example "1.5.3". pub fn version_string() -> &'static str { // Safety: Assumes `ZSTD_versionString` returns a valid utf8 string. unsafe { c_char_to_str(zstd_sys::ZSTD_versionString()) } } /// Returns the minimum (fastest) compression level supported. /// /// This is likely going to be a _very_ large negative number. pub fn min_c_level() -> CompressionLevel { // Safety: Just FFI unsafe { zstd_sys::ZSTD_minCLevel() as CompressionLevel } } /// Returns the maximum (slowest) compression level supported. pub fn max_c_level() -> CompressionLevel { // Safety: Just FFI unsafe { zstd_sys::ZSTD_maxCLevel() as CompressionLevel } } /// Wraps the `ZSTD_compress` function. /// /// This will try to compress `src` entirely and write the result to `dst`, returning the number of /// bytes written. If `dst` is too small to hold the compressed content, an error will be returned. /// /// For streaming operations that don't require to store the entire input/output in memory, see /// `compress_stream`. pub fn compress<C: WriteBuf + ?Sized>( dst: &mut C, src: &[u8], compression_level: CompressionLevel, ) -> SafeResult { // Safety: ZSTD_compress indeed returns how many bytes have been written. unsafe { dst.write_from(|buffer, capacity| { parse_code(zstd_sys::ZSTD_compress( buffer, capacity, ptr_void(src), src.len(), compression_level, )) }) } } /// Wraps the `ZSTD_decompress` function. /// /// This is a one-step decompression (not streaming). /// /// You will need to make sure `dst` is large enough to store all the decompressed content, or an /// error will be returned. /// /// If decompression was a success, the number of bytes written will be returned. pub fn decompress<C: WriteBuf + ?Sized>( dst: &mut C, src: &[u8], ) -> SafeResult { // Safety: ZSTD_decompress indeed returns how many bytes have been written. unsafe { dst.write_from(|buffer, capacity| { parse_code(zstd_sys::ZSTD_decompress( buffer, capacity, ptr_void(src), src.len(), )) }) } } /// Wraps the `ZSTD_getDecompressedSize` function. /// /// Returns `None` if the size could not be found, or if the content is actually empty. #[deprecated(note = "Use ZSTD_getFrameContentSize instead")] pub fn get_decompressed_size(src: &[u8]) -> Option<NonZeroU64> { // Safety: Just FFI NonZeroU64::new(unsafe { zstd_sys::ZSTD_getDecompressedSize(ptr_void(src), src.len()) as u64 }) } /// Maximum compressed size in worst case single-pass scenario pub fn compress_bound(src_size: usize) -> usize { // Safety: Just FFI unsafe { zstd_sys::ZSTD_compressBound(src_size) } } /// Compression context /// /// It is recommended to allocate a single context per thread and re-use it /// for many compression operations. pub struct CCtx<'a>(NonNull<zstd_sys::ZSTD_CCtx>, PhantomData<&'a ()>); impl Default for CCtx<'_> { fn default() -> Self { CCtx::create() } } impl<'a> CCtx<'a> { /// Tries to create a new context. /// /// Returns `None` if zstd returns a NULL pointer - may happen if allocation fails. pub fn try_create() -> Option<Self> { // Safety: Just FFI Some(CCtx( NonNull::new(unsafe { zstd_sys::ZSTD_createCCtx() })?, PhantomData, )) } /// Wrap `ZSTD_createCCtx` /// /// # Panics /// /// If zstd returns a NULL pointer. pub fn create() -> Self { Self::try_create() .expect("zstd returned null pointer when creating new context") } /// Wraps the `ZSTD_compressCCtx()` function pub fn compress<C: WriteBuf + ?Sized>( &mut self, dst: &mut C, src: &[u8], compression_level: CompressionLevel, ) -> SafeResult { // Safety: ZSTD_compressCCtx returns how many bytes were written. unsafe { dst.write_from(|buffer, capacity| { parse_code(zstd_sys::ZSTD_compressCCtx( self.0.as_ptr(), buffer, capacity, ptr_void(src), src.len(), compression_level, )) }) } } /// Wraps the `ZSTD_compress2()` function. pub fn compress2<C: WriteBuf + ?Sized>( &mut self, dst: &mut C, src: &[u8], ) -> SafeResult { // Safety: ZSTD_compress2 returns how many bytes were written. unsafe { dst.write_from(|buffer, capacity| { parse_code(zstd_sys::ZSTD_compress2( self.0.as_ptr(), buffer, capacity, ptr_void(src), src.len(), )) }) } } /// Wraps the `ZSTD_compress_usingDict()` function. pub fn compress_using_dict<C: WriteBuf + ?Sized>( &mut self, dst: &mut C, src: &[u8], dict: &[u8], compression_level: CompressionLevel, ) -> SafeResult { // Safety: ZSTD_compress_usingDict returns how many bytes were written. unsafe { dst.write_from(|buffer, capacity| { parse_code(zstd_sys::ZSTD_compress_usingDict( self.0.as_ptr(), buffer, capacity, ptr_void(src), src.len(), ptr_void(dict), dict.len(), compression_level, )) }) } } /// Wraps the `ZSTD_compress_usingCDict()` function. pub fn compress_using_cdict<C: WriteBuf + ?Sized>( &mut self, dst: &mut C, src: &[u8], cdict: &CDict<'_>, ) -> SafeResult { // Safety: ZSTD_compress_usingCDict returns how many bytes were written. unsafe { dst.write_from(|buffer, capacity| { parse_code(zstd_sys::ZSTD_compress_usingCDict( self.0.as_ptr(), buffer, capacity, ptr_void(src), src.len(), cdict.0.as_ptr(), )) }) } } /// Initializes the context with the given compression level. /// /// This is equivalent to running: /// * `reset()` /// * `set_parameter(CompressionLevel, compression_level)` pub fn init(&mut self, compression_level: CompressionLevel) -> SafeResult { // Safety: Just FFI let code = unsafe { zstd_sys::ZSTD_initCStream(self.0.as_ptr(), compression_level) }; parse_code(code) } /// Wraps the `ZSTD_initCStream_srcSize()` function. #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] #[deprecated] pub fn init_src_size( &mut self, compression_level: CompressionLevel, pledged_src_size: u64, ) -> SafeResult { // Safety: Just FFI let code = unsafe { zstd_sys::ZSTD_initCStream_srcSize( self.0.as_ptr(), compression_level as c_int, pledged_src_size as c_ulonglong, ) }; parse_code(code) } /// Wraps the `ZSTD_initCStream_usingDict()` function. #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] #[deprecated] pub fn init_using_dict( &mut self, dict: &[u8], compression_level: CompressionLevel, ) -> SafeResult { // Safety: Just FFI let code = unsafe { zstd_sys::ZSTD_initCStream_usingDict( self.0.as_ptr(), ptr_void(dict), dict.len(), compression_level, ) }; parse_code(code) } /// Wraps the `ZSTD_initCStream_usingCDict()` function. #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] #[deprecated] pub fn init_using_cdict<'b>(&mut self, cdict: &CDict<'b>) -> SafeResult where 'b: 'a, // Dictionary outlives the stream. { // Safety: Just FFI let code = unsafe { zstd_sys::ZSTD_initCStream_usingCDict( self.0.as_ptr(), cdict.0.as_ptr(), ) }; parse_code(code) } /// Tries to load a dictionary. /// /// The dictionary content will be copied internally and does not need to be kept alive after /// calling this function. /// /// If you need to use the same dictionary for multiple contexts, it may be more efficient to /// create a `CDict` first, then loads that. /// /// The dictionary will apply to all compressed frames, until a new dictionary is set. pub fn load_dictionary(&mut self, dict: &[u8]) -> SafeResult { // Safety: Just FFI parse_code(unsafe { zstd_sys::ZSTD_CCtx_loadDictionary( self.0.as_ptr(), ptr_void(dict), dict.len(), ) }) } /// Wraps the `ZSTD_CCtx_refCDict()` function. /// /// Dictionary must outlive the context. pub fn ref_cdict<'b>(&mut self, cdict: &CDict<'b>) -> SafeResult where 'b: 'a, { // Safety: Just FFI parse_code(unsafe { zstd_sys::ZSTD_CCtx_refCDict(self.0.as_ptr(), cdict.0.as_ptr()) }) } /// Return to "no-dictionary" mode. /// /// This will disable any dictionary/prefix previously registered for future frames. pub fn disable_dictionary(&mut self) -> SafeResult { // Safety: Just FFI parse_code(unsafe { zstd_sys::ZSTD_CCtx_loadDictionary( self.0.as_ptr(), core::ptr::null(), 0, ) }) } /// Use some prefix as single-use dictionary for the next compressed frame. /// /// Just like a dictionary, decompression will need to be given the same prefix. /// /// This is best used if the "prefix" looks like the data to be compressed. pub fn ref_prefix<'b>(&mut self, prefix: &'b [u8]) -> SafeResult where 'b: 'a, { // Safety: Just FFI parse_code(unsafe { zstd_sys::ZSTD_CCtx_refPrefix( self.0.as_ptr(), ptr_void(prefix), prefix.len(), ) }) } /// Performs a step of a streaming compression operation. /// /// This will read some data from `input` and/or write some data to `output`. /// /// # Returns /// /// A hint for the "ideal" amount of input data to provide in the next call. /// /// This hint is only for performance purposes. /// /// Wraps the `ZSTD_compressStream()` function. pub fn compress_stream<C: WriteBuf + ?Sized>( &mut self, output: &mut OutBuffer<'_, C>, input: &mut InBuffer<'_>, ) -> SafeResult { let mut output = output.wrap(); let mut input = input.wrap(); // Safety: Just FFI let code = unsafe { zstd_sys::ZSTD_compressStream( self.0.as_ptr(), ptr_mut(&mut output), ptr_mut(&mut input), ) }; parse_code(code) } /// Performs a step of a streaming compression operation. /// /// This will read some data from `input` and/or write some data to `output`. /// /// The `end_op` directive can be used to specify what to do after: nothing special, flush /// internal buffers, or end the frame. /// /// # Returns /// /// An lower bound for the amount of data that still needs to be flushed out. /// /// This is useful when flushing or ending the frame: you need to keep calling this function /// until it returns 0. /// /// Wraps the `ZSTD_compressStream2()` function. pub fn compress_stream2<C: WriteBuf + ?Sized>( &mut self, output: &mut OutBuffer<'_, C>, input: &mut InBuffer<'_>, end_op: zstd_sys::ZSTD_EndDirective, ) -> SafeResult { let mut output = output.wrap(); let mut input = input.wrap(); // Safety: Just FFI parse_code(unsafe { zstd_sys::ZSTD_compressStream2( self.0.as_ptr(), ptr_mut(&mut output), ptr_mut(&mut input), end_op, ) }) } /// Flush any intermediate buffer. /// /// To fully flush, you should keep calling this function until it returns `Ok(0)`. /// /// Wraps the `ZSTD_flushStream()` function. pub fn flush_stream<C: WriteBuf + ?Sized>( &mut self, output: &mut OutBuffer<'_, C>, ) -> SafeResult { let mut output = output.wrap(); // Safety: Just FFI let code = unsafe { zstd_sys::ZSTD_flushStream(self.0.as_ptr(), ptr_mut(&mut output)) }; parse_code(code) } /// Ends the stream. /// /// You should keep calling this function until it returns `Ok(0)`. /// /// Wraps the `ZSTD_endStream()` function. pub fn end_stream<C: WriteBuf + ?Sized>( &mut self, output: &mut OutBuffer<'_, C>, ) -> SafeResult { let mut output = output.wrap(); // Safety: Just FFI let code = unsafe { zstd_sys::ZSTD_endStream(self.0.as_ptr(), ptr_mut(&mut output)) }; parse_code(code) } /// Returns the size currently used by this context. /// /// This may change over time. pub fn sizeof(&self) -> usize { // Safety: Just FFI unsafe { zstd_sys::ZSTD_sizeof_CCtx(self.0.as_ptr()) } } /// Resets the state of the context. /// /// Depending on the reset mode, it can reset the session, the parameters, or both. /// /// Wraps the `ZSTD_CCtx_reset()` function. pub fn reset(&mut self, reset: ResetDirective) -> SafeResult { // Safety: Just FFI parse_code(unsafe { zstd_sys::ZSTD_CCtx_reset(self.0.as_ptr(), reset.as_sys()) }) } /// Sets a compression parameter. /// /// Some of these parameters need to be set during de-compression as well. pub fn set_parameter(&mut self, param: CParameter) -> SafeResult { // TODO: Until bindgen properly generates a binding for this, we'll need to do it here. #[cfg(feature = "experimental")] use zstd_sys::ZSTD_cParameter::{ ZSTD_c_experimentalParam1 as ZSTD_c_rsyncable, ZSTD_c_experimentalParam10 as ZSTD_c_stableOutBuffer, ZSTD_c_experimentalParam11 as ZSTD_c_blockDelimiters, ZSTD_c_experimentalParam12 as ZSTD_c_validateSequences, ZSTD_c_experimentalParam13 as ZSTD_c_useBlockSplitter, ZSTD_c_experimentalParam14 as ZSTD_c_useRowMatchFinder, ZSTD_c_experimentalParam15 as ZSTD_c_deterministicRefPrefix, ZSTD_c_experimentalParam16 as ZSTD_c_prefetchCDictTables, ZSTD_c_experimentalParam17 as ZSTD_c_enableSeqProducerFallback, ZSTD_c_experimentalParam18 as ZSTD_c_maxBlockSize, ZSTD_c_experimentalParam19 as ZSTD_c_searchForExternalRepcodes, ZSTD_c_experimentalParam2 as ZSTD_c_format, ZSTD_c_experimentalParam3 as ZSTD_c_forceMaxWindow, ZSTD_c_experimentalParam4 as ZSTD_c_forceAttachDict, ZSTD_c_experimentalParam5 as ZSTD_c_literalCompressionMode, ZSTD_c_experimentalParam7 as ZSTD_c_srcSizeHint, ZSTD_c_experimentalParam8 as ZSTD_c_enableDedicatedDictSearch, ZSTD_c_experimentalParam9 as ZSTD_c_stableInBuffer, }; use zstd_sys::ZSTD_cParameter::*; use CParameter::*; let (param, value) = match param { #[cfg(feature = "experimental")] RSyncable(rsyncable) => (ZSTD_c_rsyncable, rsyncable as c_int), #[cfg(feature = "experimental")] Format(format) => (ZSTD_c_format, format as c_int), #[cfg(feature = "experimental")] ForceMaxWindow(force) => (ZSTD_c_forceMaxWindow, force as c_int), #[cfg(feature = "experimental")] ForceAttachDict(force) => (ZSTD_c_forceAttachDict, force as c_int), #[cfg(feature = "experimental")] LiteralCompressionMode(mode) => { (ZSTD_c_literalCompressionMode, mode as c_int) } #[cfg(feature = "experimental")] SrcSizeHint(value) => (ZSTD_c_srcSizeHint, value as c_int), #[cfg(feature = "experimental")] EnableDedicatedDictSearch(enable) => { (ZSTD_c_enableDedicatedDictSearch, enable as c_int) } #[cfg(feature = "experimental")] StableInBuffer(stable) => (ZSTD_c_stableInBuffer, stable as c_int), #[cfg(feature = "experimental")] StableOutBuffer(stable) => { (ZSTD_c_stableOutBuffer, stable as c_int) } #[cfg(feature = "experimental")] BlockDelimiters(value) => (ZSTD_c_blockDelimiters, value as c_int), #[cfg(feature = "experimental")] ValidateSequences(validate) => { (ZSTD_c_validateSequences, validate as c_int) } #[cfg(feature = "experimental")] UseBlockSplitter(split) => { (ZSTD_c_useBlockSplitter, split as c_int) } #[cfg(feature = "experimental")] UseRowMatchFinder(mode) => { (ZSTD_c_useRowMatchFinder, mode as c_int) } #[cfg(feature = "experimental")] DeterministicRefPrefix(deterministic) => { (ZSTD_c_deterministicRefPrefix, deterministic as c_int) } #[cfg(feature = "experimental")] PrefetchCDictTables(prefetch) => { (ZSTD_c_prefetchCDictTables, prefetch as c_int) } #[cfg(feature = "experimental")] EnableSeqProducerFallback(enable) => { (ZSTD_c_enableSeqProducerFallback, enable as c_int) } #[cfg(feature = "experimental")] MaxBlockSize(value) => (ZSTD_c_maxBlockSize, value as c_int), #[cfg(feature = "experimental")] SearchForExternalRepcodes(value) => { (ZSTD_c_searchForExternalRepcodes, value as c_int) } TargetCBlockSize(value) => { (ZSTD_c_targetCBlockSize, value as c_int) } CompressionLevel(level) => (ZSTD_c_compressionLevel, level), WindowLog(value) => (ZSTD_c_windowLog, value as c_int), HashLog(value) => (ZSTD_c_hashLog, value as c_int), ChainLog(value) => (ZSTD_c_chainLog, value as c_int), SearchLog(value) => (ZSTD_c_searchLog, value as c_int), MinMatch(value) => (ZSTD_c_minMatch, value as c_int), TargetLength(value) => (ZSTD_c_targetLength, value as c_int), Strategy(strategy) => (ZSTD_c_strategy, strategy as c_int), EnableLongDistanceMatching(flag) => { (ZSTD_c_enableLongDistanceMatching, flag as c_int) } LdmHashLog(value) => (ZSTD_c_ldmHashLog, value as c_int), LdmMinMatch(value) => (ZSTD_c_ldmMinMatch, value as c_int), LdmBucketSizeLog(value) => { (ZSTD_c_ldmBucketSizeLog, value as c_int) } LdmHashRateLog(value) => (ZSTD_c_ldmHashRateLog, value as c_int), ContentSizeFlag(flag) => (ZSTD_c_contentSizeFlag, flag as c_int), ChecksumFlag(flag) => (ZSTD_c_checksumFlag, flag as c_int), DictIdFlag(flag) => (ZSTD_c_dictIDFlag, flag as c_int), NbWorkers(value) => (ZSTD_c_nbWorkers, value as c_int), JobSize(value) => (ZSTD_c_jobSize, value as c_int), OverlapSizeLog(value) => (ZSTD_c_overlapLog, value as c_int), }; // Safety: Just FFI parse_code(unsafe { zstd_sys::ZSTD_CCtx_setParameter(self.0.as_ptr(), param, value) }) } /// Guarantee that the input size will be this value. /// /// If given `None`, assumes the size is unknown. /// /// Unless explicitly disabled, this will cause the size to be written in the compressed frame /// header. /// /// If the actual data given to compress has a different size, an error will be returned. pub fn set_pledged_src_size( &mut self, pledged_src_size: Option<u64>, ) -> SafeResult { // Safety: Just FFI parse_code(unsafe { zstd_sys::ZSTD_CCtx_setPledgedSrcSize( self.0.as_ptr(), pledged_src_size.unwrap_or(CONTENTSIZE_UNKNOWN) as c_ulonglong, ) }) } /// Creates a copy of this context. /// /// This only works before any data has been compressed. An error will be /// returned otherwise. #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] pub fn try_clone( &self, pledged_src_size: Option<u64>, ) -> Result<Self, ErrorCode> { // Safety: Just FFI let context = NonNull::new(unsafe { zstd_sys::ZSTD_createCCtx() }) .ok_or(0usize)?; // Safety: Just FFI parse_code(unsafe { zstd_sys::ZSTD_copyCCtx( context.as_ptr(), self.0.as_ptr(), pledged_src_size.unwrap_or(CONTENTSIZE_UNKNOWN), ) })?; Ok(CCtx(context, self.1)) } /// Wraps the `ZSTD_getBlockSize()` function. #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] pub fn get_block_size(&self) -> usize { // Safety: Just FFI unsafe { zstd_sys::ZSTD_getBlockSize(self.0.as_ptr()) } } /// Wraps the `ZSTD_compressBlock()` function. #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] pub fn compress_block<C: WriteBuf + ?Sized>( &mut self, dst: &mut C, src: &[u8], ) -> SafeResult { // Safety: ZSTD_compressBlock returns the number of bytes written. unsafe { dst.write_from(|buffer, capacity| { parse_code(zstd_sys::ZSTD_compressBlock( self.0.as_ptr(), buffer, capacity, ptr_void(src), src.len(), )) }) } } /// Returns the recommended input buffer size. /// /// Using this size may result in minor performance boost. pub fn in_size() -> usize { // Safety: Just FFI unsafe { zstd_sys::ZSTD_CStreamInSize() } } /// Returns the recommended output buffer size. /// /// Using this may result in minor performance boost. pub fn out_size() -> usize { // Safety: Just FFI unsafe { zstd_sys::ZSTD_CStreamOutSize() } } /// Use a shared thread pool for this context. /// /// Thread pool must outlive the context. #[cfg(all(feature = "experimental", feature = "zstdmt"))] #[cfg_attr( feature = "doc-cfg", doc(cfg(all(feature = "experimental", feature = "zstdmt"))) )] pub fn ref_thread_pool<'b>(&mut self, pool: &'b ThreadPool) -> SafeResult where 'b: 'a, { parse_code(unsafe { zstd_sys::ZSTD_CCtx_refThreadPool(self.0.as_ptr(), pool.0.as_ptr()) }) } /// Return to using a private thread pool for this context. #[cfg(all(feature = "experimental", feature = "zstdmt"))] #[cfg_attr( feature = "doc-cfg", doc(cfg(all(feature = "experimental", feature = "zstdmt"))) )] pub fn disable_thread_pool(&mut self) -> SafeResult { parse_code(unsafe { zstd_sys::ZSTD_CCtx_refThreadPool( self.0.as_ptr(), core::ptr::null_mut(), ) }) } } impl<'a> Drop for CCtx<'a> { fn drop(&mut self) { // Safety: Just FFI unsafe { zstd_sys::ZSTD_freeCCtx(self.0.as_ptr()); } } } unsafe impl Send for CCtx<'_> {} // Non thread-safe methods already take `&mut self`, so it's fine to implement Sync here. unsafe impl Sync for CCtx<'_> {} unsafe fn c_char_to_str(text: *const c_char) -> &'static str { core::ffi::CStr::from_ptr(text) .to_str() .expect("bad error message from zstd") } /// Returns the error string associated with an error code. pub fn get_error_name(code: usize) -> &'static str { unsafe { // Safety: assumes ZSTD returns a well-formed utf8 string. let name = zstd_sys::ZSTD_getErrorName(code); c_char_to_str(name) } } /// A Decompression Context. /// /// The lifetime references the potential dictionary used for this context. /// /// If no dictionary was used, it will most likely be `'static`. /// /// Same as `DStream`. pub struct DCtx<'a>(NonNull<zstd_sys::ZSTD_DCtx>, PhantomData<&'a ()>); impl Default for DCtx<'_> { fn default() -> Self { DCtx::create() } } impl<'a> DCtx<'a> { /// Try to create a new decompression context. /// /// Returns `None` if the operation failed (for example, not enough memory). pub fn try_create() -> Option<Self> { Some(DCtx( NonNull::new(unsafe { zstd_sys::ZSTD_createDCtx() })?, PhantomData, )) } /// Creates a new decoding context. /// /// # Panics /// /// If the context creation fails. pub fn create() -> Self { Self::try_create() .expect("zstd returned null pointer when creating new context") } /// Fully decompress the given frame. /// /// This decompress an entire frame in-memory. If you can have enough memory to store both the /// input and output buffer, then it may be faster that streaming decompression. /// /// Wraps the `ZSTD_decompressDCtx()` function. pub fn decompress<C: WriteBuf + ?Sized>( &mut self, dst: &mut C, src: &[u8], ) -> SafeResult { unsafe { dst.write_from(|buffer, capacity| { parse_code(zstd_sys::ZSTD_decompressDCtx( self.0.as_ptr(), buffer, capacity, ptr_void(src), src.len(), )) }) } } /// Fully decompress the given frame using a dictionary. /// /// Dictionary must be identical to the one used during compression. /// /// If you plan on using the same dictionary multiple times, it is faster to create a `DDict` /// first and use `decompress_using_ddict`. /// /// Wraps `ZSTD_decompress_usingDict` pub fn decompress_using_dict<C: WriteBuf + ?Sized>( &mut self, dst: &mut C, src: &[u8], dict: &[u8], ) -> SafeResult { unsafe { dst.write_from(|buffer, capacity| { parse_code(zstd_sys::ZSTD_decompress_usingDict( self.0.as_ptr(), buffer, capacity, ptr_void(src), src.len(), ptr_void(dict), dict.len(), )) }) } } /// Fully decompress the given frame using a dictionary. /// /// Dictionary must be identical to the one used during compression. /// /// Wraps the `ZSTD_decompress_usingDDict()` function. pub fn decompress_using_ddict<C: WriteBuf + ?Sized>( &mut self, dst: &mut C, src: &[u8], ddict: &DDict<'_>, ) -> SafeResult { unsafe { dst.write_from(|buffer, capacity| { parse_code(zstd_sys::ZSTD_decompress_usingDDict( self.0.as_ptr(), buffer, capacity, ptr_void(src), src.len(), ddict.0.as_ptr(), )) }) } } /// Initializes an existing `DStream` for decompression. /// /// This is equivalent to calling: /// * `reset(SessionOnly)` /// * `disable_dictionary()` /// /// Wraps the `ZSTD_initCStream()` function. pub fn init(&mut self) -> SafeResult { let code = unsafe { zstd_sys::ZSTD_initDStream(self.0.as_ptr()) }; parse_code(code) } /// Wraps the `ZSTD_initDStream_usingDict()` function. #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] #[deprecated] pub fn init_using_dict(&mut self, dict: &[u8]) -> SafeResult { let code = unsafe { zstd_sys::ZSTD_initDStream_usingDict( self.0.as_ptr(), ptr_void(dict), dict.len(), ) }; parse_code(code) } /// Wraps the `ZSTD_initDStream_usingDDict()` function. #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] #[deprecated] pub fn init_using_ddict<'b>(&mut self, ddict: &DDict<'b>) -> SafeResult where 'b: 'a, { let code = unsafe { zstd_sys::ZSTD_initDStream_usingDDict( self.0.as_ptr(), ddict.0.as_ptr(), ) }; parse_code(code) } /// Resets the state of the context. /// /// Depending on the reset mode, it can reset the session, the parameters, or both. /// /// Wraps the `ZSTD_DCtx_reset()` function. pub fn reset(&mut self, reset: ResetDirective) -> SafeResult { parse_code(unsafe { zstd_sys::ZSTD_DCtx_reset(self.0.as_ptr(), reset.as_sys()) }) } /// Loads a dictionary. /// /// This will let this context decompress frames that were compressed using this dictionary. /// /// The dictionary content will be copied internally and does not need to be kept alive after /// calling this function. /// /// If you need to use the same dictionary for multiple contexts, it may be more efficient to /// create a `DDict` first, then loads that. /// /// The dictionary will apply to all future frames, until a new dictionary is set. pub fn load_dictionary(&mut self, dict: &[u8]) -> SafeResult { parse_code(unsafe { zstd_sys::ZSTD_DCtx_loadDictionary( self.0.as_ptr(), ptr_void(dict), dict.len(), ) }) } /// Return to "no-dictionary" mode. /// /// This will disable any dictionary/prefix previously registered for future frames. pub fn disable_dictionary(&mut self) -> SafeResult { parse_code(unsafe { zstd_sys::ZSTD_DCtx_loadDictionary( self.0.as_ptr(), core::ptr::null(), 0, ) }) } /// References a dictionary. /// /// This will let this context decompress frames compressed with the same dictionary. /// /// It will apply to all frames decompressed by this context (until a new dictionary is set). /// /// Wraps the `ZSTD_DCtx_refDDict()` function. pub fn ref_ddict<'b>(&mut self, ddict: &DDict<'b>) -> SafeResult where 'b: 'a, { parse_code(unsafe { zstd_sys::ZSTD_DCtx_refDDict(self.0.as_ptr(), ddict.0.as_ptr()) }) } /// Use some prefix as single-use dictionary for the next frame. /// /// Just like a dictionary, this only works if compression was done with the same prefix. /// /// But unlike a dictionary, this only applies to the next frame. /// /// Wraps the `ZSTD_DCtx_refPrefix()` function. pub fn ref_prefix<'b>(&mut self, prefix: &'b [u8]) -> SafeResult where 'b: 'a, { parse_code(unsafe { zstd_sys::ZSTD_DCtx_refPrefix( self.0.as_ptr(), ptr_void(prefix), prefix.len(), ) }) } /// Sets a decompression parameter. pub fn set_parameter(&mut self, param: DParameter) -> SafeResult { #[cfg(feature = "experimental")] use zstd_sys::ZSTD_dParameter::{ ZSTD_d_experimentalParam1 as ZSTD_d_format, ZSTD_d_experimentalParam2 as ZSTD_d_stableOutBuffer, ZSTD_d_experimentalParam3 as ZSTD_d_forceIgnoreChecksum, ZSTD_d_experimentalParam4 as ZSTD_d_refMultipleDDicts, }; use zstd_sys::ZSTD_dParameter::*; use DParameter::*; let (param, value) = match param { #[cfg(feature = "experimental")] Format(format) => (ZSTD_d_format, format as c_int), #[cfg(feature = "experimental")] StableOutBuffer(stable) => { (ZSTD_d_stableOutBuffer, stable as c_int) } #[cfg(feature = "experimental")] ForceIgnoreChecksum(force) => { (ZSTD_d_forceIgnoreChecksum, force as c_int) } #[cfg(feature = "experimental")] RefMultipleDDicts(value) => { (ZSTD_d_refMultipleDDicts, value as c_int) } WindowLogMax(value) => (ZSTD_d_windowLogMax, value as c_int), }; parse_code(unsafe { zstd_sys::ZSTD_DCtx_setParameter(self.0.as_ptr(), param, value) }) } /// Performs a step of a streaming decompression operation. /// /// This will read some data from `input` and/or write some data to `output`. /// /// # Returns /// /// * `Ok(0)` if the current frame just finished decompressing successfully. /// * `Ok(hint)` with a hint for the "ideal" amount of input data to provide in the next call. /// Can be safely ignored. /// /// Wraps the `ZSTD_decompressStream()` function. pub fn decompress_stream<C: WriteBuf + ?Sized>( &mut self, output: &mut OutBuffer<'_, C>, input: &mut InBuffer<'_>, ) -> SafeResult { let mut output = output.wrap(); let mut input = input.wrap(); let code = unsafe { zstd_sys::ZSTD_decompressStream( self.0.as_ptr(), ptr_mut(&mut output), ptr_mut(&mut input), ) }; parse_code(code) } /// Wraps the `ZSTD_DStreamInSize()` function. /// /// Returns a hint for the recommended size of the input buffer for decompression. pub fn in_size() -> usize { unsafe { zstd_sys::ZSTD_DStreamInSize() } } /// Wraps the `ZSTD_DStreamOutSize()` function. /// /// Returns a hint for the recommended size of the output buffer for decompression. pub fn out_size() -> usize { unsafe { zstd_sys::ZSTD_DStreamOutSize() } } /// Wraps the `ZSTD_sizeof_DCtx()` function. pub fn sizeof(&self) -> usize { unsafe { zstd_sys::ZSTD_sizeof_DCtx(self.0.as_ptr()) } } /// Wraps the `ZSTD_decompressBlock()` function. #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] pub fn decompress_block<C: WriteBuf + ?Sized>( &mut self, dst: &mut C, src: &[u8], ) -> SafeResult { unsafe { dst.write_from(|buffer, capacity| { parse_code(zstd_sys::ZSTD_decompressBlock( self.0.as_ptr(), buffer, capacity, ptr_void(src), src.len(), )) }) } } /// Wraps the `ZSTD_insertBlock()` function. #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] pub fn insert_block(&mut self, block: &[u8]) -> usize { unsafe { zstd_sys::ZSTD_insertBlock( self.0.as_ptr(), ptr_void(block), block.len(), ) } } /// Creates a copy of this context. /// /// This only works before any data has been decompressed. An error will be /// returned otherwise. #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] pub fn try_clone(&self) -> Result<Self, ErrorCode> { let context = NonNull::new(unsafe { zstd_sys::ZSTD_createDCtx() }) .ok_or(0usize)?; unsafe { zstd_sys::ZSTD_copyDCtx(context.as_ptr(), self.0.as_ptr()) }; Ok(DCtx(context, self.1)) } } impl Drop for DCtx<'_> { fn drop(&mut self) { unsafe { zstd_sys::ZSTD_freeDCtx(self.0.as_ptr()); } } } unsafe impl Send for DCtx<'_> {} // Non thread-safe methods already take `&mut self`, so it's fine to implement Sync here. unsafe impl Sync for DCtx<'_> {} /// Compression dictionary. pub struct CDict<'a>(NonNull<zstd_sys::ZSTD_CDict>, PhantomData<&'a ()>); impl CDict<'static> { /// Prepare a dictionary to compress data. /// /// This will make it easier for compression contexts to load this dictionary. /// /// The dictionary content will be copied internally, and does not need to be kept around. /// /// # Panics /// /// If loading this dictionary failed. pub fn create( dict_buffer: &[u8], compression_level: CompressionLevel, ) -> Self { Self::try_create(dict_buffer, compression_level) .expect("zstd returned null pointer when creating dict") } /// Prepare a dictionary to compress data. /// /// This will make it easier for compression contexts to load this dictionary. /// /// The dictionary content will be copied internally, and does not need to be kept around. pub fn try_create( dict_buffer: &[u8], compression_level: CompressionLevel, ) -> Option<Self> { Some(CDict( NonNull::new(unsafe { zstd_sys::ZSTD_createCDict( ptr_void(dict_buffer), dict_buffer.len(), compression_level, ) })?, PhantomData, )) } } impl<'a> CDict<'a> { #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] pub fn create_by_reference( dict_buffer: &'a [u8], compression_level: CompressionLevel, ) -> Self { CDict( NonNull::new(unsafe { zstd_sys::ZSTD_createCDict_byReference( ptr_void(dict_buffer), dict_buffer.len(), compression_level, ) }) .expect("zstd returned null pointer"), PhantomData, ) } /// Returns the _current_ memory usage of this dictionary. /// /// Note that this may change over time. pub fn sizeof(&self) -> usize { unsafe { zstd_sys::ZSTD_sizeof_CDict(self.0.as_ptr()) } } /// Returns the dictionary ID for this dict. /// /// Returns `None` if this dictionary is empty or invalid. pub fn get_dict_id(&self) -> Option<NonZeroU32> { NonZeroU32::new(unsafe { zstd_sys::ZSTD_getDictID_fromCDict(self.0.as_ptr()) as u32 }) } } /// Wraps the `ZSTD_createCDict()` function. pub fn create_cdict( dict_buffer: &[u8], compression_level: CompressionLevel, ) -> CDict<'static> { CDict::create(dict_buffer, compression_level) } impl<'a> Drop for CDict<'a> { fn drop(&mut self) { unsafe { zstd_sys::ZSTD_freeCDict(self.0.as_ptr()); } } } unsafe impl<'a> Send for CDict<'a> {} unsafe impl<'a> Sync for CDict<'a> {} /// Wraps the `ZSTD_compress_usingCDict()` function. pub fn compress_using_cdict( cctx: &mut CCtx<'_>, dst: &mut [u8], src: &[u8], cdict: &CDict<'_>, ) -> SafeResult { cctx.compress_using_cdict(dst, src, cdict) } /// A digested decompression dictionary. pub struct DDict<'a>(NonNull<zstd_sys::ZSTD_DDict>, PhantomData<&'a ()>); impl DDict<'static> { pub fn create(dict_buffer: &[u8]) -> Self { Self::try_create(dict_buffer) .expect("zstd returned null pointer when creating dict") } pub fn try_create(dict_buffer: &[u8]) -> Option<Self> { Some(DDict( NonNull::new(unsafe { zstd_sys::ZSTD_createDDict( ptr_void(dict_buffer), dict_buffer.len(), ) })?, PhantomData, )) } } impl<'a> DDict<'a> { pub fn sizeof(&self) -> usize { unsafe { zstd_sys::ZSTD_sizeof_DDict(self.0.as_ptr()) } } /// Wraps the `ZSTD_createDDict_byReference()` function. /// /// The dictionary will keep referencing `dict_buffer`. #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] pub fn create_by_reference(dict_buffer: &'a [u8]) -> Self { DDict( NonNull::new(unsafe { zstd_sys::ZSTD_createDDict_byReference( ptr_void(dict_buffer), dict_buffer.len(), ) }) .expect("zstd returned null pointer"), PhantomData, ) } /// Returns the dictionary ID for this dict. /// /// Returns `None` if this dictionary is empty or invalid. pub fn get_dict_id(&self) -> Option<NonZeroU32> { NonZeroU32::new(unsafe { zstd_sys::ZSTD_getDictID_fromDDict(self.0.as_ptr()) as u32 }) } } /// Wraps the `ZSTD_createDDict()` function. /// /// It copies the dictionary internally, so the resulting `DDict` is `'static`. pub fn create_ddict(dict_buffer: &[u8]) -> DDict<'static> { DDict::create(dict_buffer) } impl<'a> Drop for DDict<'a> { fn drop(&mut self) { unsafe { zstd_sys::ZSTD_freeDDict(self.0.as_ptr()); } } } unsafe impl<'a> Send for DDict<'a> {} unsafe impl<'a> Sync for DDict<'a> {} /// A shared thread pool for one or more compression contexts #[cfg(all(feature = "experimental", feature = "zstdmt"))] #[cfg_attr( feature = "doc-cfg", doc(cfg(all(feature = "experimental", feature = "zstdmt"))) )] pub struct ThreadPool(NonNull<zstd_sys::ZSTD_threadPool>); #[cfg(all(feature = "experimental", feature = "zstdmt"))] #[cfg_attr( feature = "doc-cfg", doc(cfg(all(feature = "experimental", feature = "zstdmt"))) )] impl ThreadPool { /// Create a thread pool with the specified number of threads. /// /// # Panics /// /// If creating the thread pool failed. pub fn new(num_threads: usize) -> Self { Self::try_new(num_threads) .expect("zstd returned null pointer when creating thread pool") } /// Create a thread pool with the specified number of threads. pub fn try_new(num_threads: usize) -> Option<Self> { Some(Self(NonNull::new(unsafe { zstd_sys::ZSTD_createThreadPool(num_threads) })?)) } } #[cfg(all(feature = "experimental", feature = "zstdmt"))] #[cfg_attr( feature = "doc-cfg", doc(cfg(all(feature = "experimental", feature = "zstdmt"))) )] impl Drop for ThreadPool { fn drop(&mut self) { unsafe { zstd_sys::ZSTD_freeThreadPool(self.0.as_ptr()); } } } #[cfg(all(feature = "experimental", feature = "zstdmt"))] #[cfg_attr( feature = "doc-cfg", doc(cfg(all(feature = "experimental", feature = "zstdmt"))) )] unsafe impl Send for ThreadPool {} #[cfg(all(feature = "experimental", feature = "zstdmt"))] #[cfg_attr( feature = "doc-cfg", doc(cfg(all(feature = "experimental", feature = "zstdmt"))) )] unsafe impl Sync for ThreadPool {} /// Wraps the `ZSTD_decompress_usingDDict()` function. pub fn decompress_using_ddict( dctx: &mut DCtx<'_>, dst: &mut [u8], src: &[u8], ddict: &DDict<'_>, ) -> SafeResult { dctx.decompress_using_ddict(dst, src, ddict) } /// Compression stream. /// /// Same as `CCtx`. pub type CStream<'a> = CCtx<'a>; // CStream can't be shared across threads, so it does not implement Sync. /// Allocates a new `CStream`. pub fn create_cstream<'a>() -> CStream<'a> { CCtx::create() } /// Prepares an existing `CStream` for compression at the given level. pub fn init_cstream( zcs: &mut CStream<'_>, compression_level: CompressionLevel, ) -> SafeResult { zcs.init(compression_level) } #[derive(Debug)] /// Wrapper around an input buffer. /// /// Bytes will be read starting at `src[pos]`. /// /// `pos` will be updated after reading. pub struct InBuffer<'a> { pub src: &'a [u8], pub pos: usize, } /// Describe a bytes container, like `Vec<u8>`. /// /// Represents a contiguous segment of allocated memory, a prefix of which is initialized. /// /// It allows starting from an uninitializes chunk of memory and writing to it, progressively /// initializing it. No re-allocation typically occur after the initial creation. /// /// The main implementors are: /// * `Vec<u8>` and similar structures. These hold both a length (initialized data) and a capacity /// (allocated memory). /// /// Use `Vec::with_capacity` to create an empty `Vec` with non-zero capacity, and the length /// field will be updated to cover the data written to it (as long as it fits in the given /// capacity). /// * `[u8]` and `[u8; N]`. These must start already-initialized, and will not be resized. It will /// be up to the caller to only use the part that was written (as returned by the various writing /// operations). /// * `std::io::Cursor<T: WriteBuf>`. This will ignore data before the cursor's position, and /// append data after that. pub unsafe trait WriteBuf { /// Returns the valid data part of this container. Should only cover initialized data. fn as_slice(&self) -> &[u8]; /// Returns the full capacity of this container. May include uninitialized data. fn capacity(&self) -> usize; /// Returns a pointer to the start of the data. fn as_mut_ptr(&mut self) -> *mut u8; /// Indicates that the first `n` bytes of the container have been written. /// /// Safety: this should only be called if the `n` first bytes of this buffer have actually been /// initialized. unsafe fn filled_until(&mut self, n: usize); /// Call the given closure using the pointer and capacity from `self`. /// /// Assumes the given function returns a parseable code, which if valid, represents how many /// bytes were written to `self`. /// /// The given closure must treat its first argument as pointing to potentially uninitialized /// memory, and should not read from it. /// /// In addition, it must have written at least `n` bytes contiguously from this pointer, where /// `n` is the returned value. unsafe fn write_from<F>(&mut self, f: F) -> SafeResult where F: FnOnce(*mut c_void, usize) -> SafeResult, { let res = f(ptr_mut_void(self), self.capacity()); if let Ok(n) = res { self.filled_until(n); } res } } #[cfg(feature = "std")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "std")))] unsafe impl<T> WriteBuf for std::io::Cursor<T> where T: WriteBuf, { fn as_slice(&self) -> &[u8] { &self.get_ref().as_slice()[self.position() as usize..] } fn capacity(&self) -> usize { self.get_ref() .capacity() .saturating_sub(self.position() as usize) } fn as_mut_ptr(&mut self) -> *mut u8 { let start = self.position() as usize; assert!(start <= self.get_ref().capacity()); // Safety: start is still in the same memory allocation unsafe { self.get_mut().as_mut_ptr().add(start) } } unsafe fn filled_until(&mut self, n: usize) { // Early exit: `n = 0` does not indicate anything. if n == 0 { return; } // Here we assume data _before_ self.position() was already initialized. // Egh it's not actually guaranteed by Cursor? So let's guarantee it ourselves. // Since the cursor wraps another `WriteBuf`, we know how much data is initialized there. let position = self.position() as usize; let initialized = self.get_ref().as_slice().len(); if let Some(uninitialized) = position.checked_sub(initialized) { // Here, the cursor is further than the known-initialized part. // Cursor's solution is to pad with zeroes, so let's do the same. // We'll zero bytes from the end of valid data (as_slice().len()) to the cursor position. // Safety: // * We know `n > 0` // * This means `self.capacity() > 0` (promise by the caller) // * This means `self.get_ref().capacity() > self.position` // * This means that `position` is within the nested pointer's allocation. // * Finally, `initialized + uninitialized = position`, so the entire byte // range here is within the allocation unsafe { self.get_mut() .as_mut_ptr() .add(initialized) .write_bytes(0u8, uninitialized) }; } let start = self.position() as usize; assert!(start + n <= self.get_ref().capacity()); self.get_mut().filled_until(start + n); } } #[cfg(feature = "std")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "std")))] unsafe impl<'a> WriteBuf for &'a mut std::vec::Vec<u8> { fn as_slice(&self) -> &[u8] { std::vec::Vec::as_slice(self) } fn capacity(&self) -> usize { std::vec::Vec::capacity(self) } fn as_mut_ptr(&mut self) -> *mut u8 { std::vec::Vec::as_mut_ptr(self) } unsafe fn filled_until(&mut self, n: usize) { std::vec::Vec::set_len(self, n) } } #[cfg(feature = "std")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "std")))] unsafe impl WriteBuf for std::vec::Vec<u8> { fn as_slice(&self) -> &[u8] { &self[..] } fn capacity(&self) -> usize { self.capacity() } fn as_mut_ptr(&mut self) -> *mut u8 { self.as_mut_ptr() } unsafe fn filled_until(&mut self, n: usize) { self.set_len(n); } } #[cfg(feature = "arrays")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "arrays")))] unsafe impl<const N: usize> WriteBuf for [u8; N] { fn as_slice(&self) -> &[u8] { self } fn capacity(&self) -> usize { self.len() } fn as_mut_ptr(&mut self) -> *mut u8 { (&mut self[..]).as_mut_ptr() } unsafe fn filled_until(&mut self, _n: usize) { // Assume the slice is already initialized } } unsafe impl WriteBuf for [u8] { fn as_slice(&self) -> &[u8] { self } fn capacity(&self) -> usize { self.len() } fn as_mut_ptr(&mut self) -> *mut u8 { self.as_mut_ptr() } unsafe fn filled_until(&mut self, _n: usize) { // Assume the slice is already initialized } } /* // This is possible, but... why? unsafe impl<'a> WriteBuf for OutBuffer<'a, [u8]> { fn as_slice(&self) -> &[u8] { self.dst } fn capacity(&self) -> usize { self.dst.len() } fn as_mut_ptr(&mut self) -> *mut u8 { self.dst.as_mut_ptr() } unsafe fn filled_until(&mut self, n: usize) { self.pos = n; } } */ #[derive(Debug)] /// Wrapper around an output buffer. /// /// `C` is usually either `[u8]` or `Vec<u8>`. /// /// Bytes will be written starting at `dst[pos]`. /// /// `pos` will be updated after writing. /// /// # Invariant /// /// `pos <= dst.capacity()` pub struct OutBuffer<'a, C: WriteBuf + ?Sized> { dst: &'a mut C, pos: usize, } /// Convenience method to get a mut pointer from a mut ref. fn ptr_mut<B>(ptr_void: &mut B) -> *mut B { ptr_void as *mut B } /// Interface between a C-level ZSTD_outBuffer and a rust-level `OutBuffer`. /// /// Will update the parent buffer from the C buffer on drop. struct OutBufferWrapper<'a, 'b, C: WriteBuf + ?Sized> { buf: zstd_sys::ZSTD_outBuffer, parent: &'a mut OutBuffer<'b, C>, } impl<'a, 'b: 'a, C: WriteBuf + ?Sized> Deref for OutBufferWrapper<'a, 'b, C> { type Target = zstd_sys::ZSTD_outBuffer; fn deref(&self) -> &Self::Target { &self.buf } } impl<'a, 'b: 'a, C: WriteBuf + ?Sized> DerefMut for OutBufferWrapper<'a, 'b, C> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.buf } } impl<'a, C: WriteBuf + ?Sized> OutBuffer<'a, C> { /// Returns a new `OutBuffer` around the given slice. /// /// Starts with `pos = 0`. pub fn around(dst: &'a mut C) -> Self { OutBuffer { dst, pos: 0 } } /// Returns a new `OutBuffer` around the given slice, starting at the given position. /// /// # Panics /// /// If `pos > dst.capacity()`. pub fn around_pos(dst: &'a mut C, pos: usize) -> Self { if pos > dst.capacity() { panic!("Given position outside of the buffer bounds."); } OutBuffer { dst, pos } } /// Returns the current cursor position. /// /// Guaranteed to be <= self.capacity() pub fn pos(&self) -> usize { assert!(self.pos <= self.dst.capacity()); self.pos } /// Returns the capacity of the underlying buffer. pub fn capacity(&self) -> usize { self.dst.capacity() } /// Sets the new cursor position. /// /// # Panics /// /// If `pos > self.dst.capacity()`. /// /// # Safety /// /// Data up to `pos` must have actually been written to. pub unsafe fn set_pos(&mut self, pos: usize) { if pos > self.dst.capacity() { panic!("Given position outside of the buffer bounds."); } self.dst.filled_until(pos); self.pos = pos; } fn wrap<'b>(&'b mut self) -> OutBufferWrapper<'b, 'a, C> { OutBufferWrapper { buf: zstd_sys::ZSTD_outBuffer { dst: ptr_mut_void(self.dst), size: self.dst.capacity(), pos: self.pos, }, parent: self, } } /// Returns the part of this buffer that was written to. pub fn as_slice<'b>(&'b self) -> &'a [u8] where 'b: 'a, { let pos = self.pos; &self.dst.as_slice()[..pos] } /// Returns a pointer to the start of this buffer. pub fn as_mut_ptr(&mut self) -> *mut u8 { self.dst.as_mut_ptr() } } impl<'a, 'b, C: WriteBuf + ?Sized> Drop for OutBufferWrapper<'a, 'b, C> { fn drop(&mut self) { // Safe because we guarantee that data until `self.buf.pos` has been written. unsafe { self.parent.set_pos(self.buf.pos) }; } } struct InBufferWrapper<'a, 'b> { buf: zstd_sys::ZSTD_inBuffer, parent: &'a mut InBuffer<'b>, } impl<'a, 'b: 'a> Deref for InBufferWrapper<'a, 'b> { type Target = zstd_sys::ZSTD_inBuffer; fn deref(&self) -> &Self::Target { &self.buf } } impl<'a, 'b: 'a> DerefMut for InBufferWrapper<'a, 'b> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.buf } } impl<'a> InBuffer<'a> { /// Returns a new `InBuffer` around the given slice. /// /// Starts with `pos = 0`. pub fn around(src: &'a [u8]) -> Self { InBuffer { src, pos: 0 } } /// Returns the current cursor position. pub fn pos(&self) -> usize { self.pos } /// Sets the new cursor position. /// /// # Panics /// /// If `pos > self.src.len()`. pub fn set_pos(&mut self, pos: usize) { if pos > self.src.len() { panic!("Given position outside of the buffer bounds."); } self.pos = pos; } fn wrap<'b>(&'b mut self) -> InBufferWrapper<'b, 'a> { InBufferWrapper { buf: zstd_sys::ZSTD_inBuffer { src: ptr_void(self.src), size: self.src.len(), pos: self.pos, }, parent: self, } } } impl<'a, 'b> Drop for InBufferWrapper<'a, 'b> { fn drop(&mut self) { self.parent.set_pos(self.buf.pos); } } /// A Decompression stream. /// /// Same as `DCtx`. pub type DStream<'a> = DCtx<'a>; // Some functions work on a "frame prefix". // TODO: Define `struct FramePrefix(&[u8]);` and move these functions to it? // // Some other functions work on a dictionary (not CDict or DDict). // Same thing? /// Wraps the `ZSTD_findFrameCompressedSize()` function. /// /// `src` should contain at least an entire frame. pub fn find_frame_compressed_size(src: &[u8]) -> SafeResult { let code = unsafe { zstd_sys::ZSTD_findFrameCompressedSize(ptr_void(src), src.len()) }; parse_code(code) } /// Wraps the `ZSTD_getFrameContentSize()` function. /// /// Args: /// * `src`: A prefix of the compressed frame. It should at least include the frame header. /// /// Returns: /// * `Err(ContentSizeError)` if `src` is too small of a prefix, or if it appears corrupted. /// * `Ok(None)` if the frame does not include a content size. /// * `Ok(Some(content_size_in_bytes))` otherwise. pub fn get_frame_content_size( src: &[u8], ) -> Result<Option<u64>, ContentSizeError> { parse_content_size(unsafe { zstd_sys::ZSTD_getFrameContentSize(ptr_void(src), src.len()) }) } /// Wraps the `ZSTD_findDecompressedSize()` function. /// /// `src` should be exactly a sequence of ZSTD frames. #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] pub fn find_decompressed_size( src: &[u8], ) -> Result<Option<u64>, ContentSizeError> { parse_content_size(unsafe { zstd_sys::ZSTD_findDecompressedSize(ptr_void(src), src.len()) }) } /// Wraps the `ZSTD_isFrame()` function. #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] pub fn is_frame(buffer: &[u8]) -> bool { unsafe { zstd_sys::ZSTD_isFrame(ptr_void(buffer), buffer.len()) > 0 } } /// Wraps the `ZSTD_getDictID_fromDict()` function. /// /// Returns `None` if the dictionary is not a valid zstd dictionary. pub fn get_dict_id_from_dict(dict: &[u8]) -> Option<NonZeroU32> { NonZeroU32::new(unsafe { zstd_sys::ZSTD_getDictID_fromDict(ptr_void(dict), dict.len()) as u32 }) } /// Wraps the `ZSTD_getDictID_fromFrame()` function. /// /// Returns `None` if the dictionary ID could not be decoded. This may happen if: /// * The frame was not encoded with a dictionary. /// * The frame intentionally did not include dictionary ID. /// * The dictionary was non-conformant. /// * `src` is too small and does not include the frame header. /// * `src` is not a valid zstd frame prefix. pub fn get_dict_id_from_frame(src: &[u8]) -> Option<NonZeroU32> { NonZeroU32::new(unsafe { zstd_sys::ZSTD_getDictID_fromFrame(ptr_void(src), src.len()) as u32 }) } /// What kind of context reset should be applied. pub enum ResetDirective { /// Only the session will be reset. /// /// All parameters will be preserved (including the dictionary). /// But any frame being processed will be dropped. /// /// It can be useful to start re-using a context after an error or when an /// ongoing compression is no longer needed. SessionOnly, /// Only reset parameters (including dictionary or referenced prefix). /// /// All parameters will be reset to default values. /// /// This can only be done between sessions - no compression or decompression must be ongoing. Parameters, /// Reset both the session and parameters. /// /// The result is similar to a newly created context. SessionAndParameters, } impl ResetDirective { fn as_sys(self) -> zstd_sys::ZSTD_ResetDirective { match self { ResetDirective::SessionOnly => zstd_sys::ZSTD_ResetDirective::ZSTD_reset_session_only, ResetDirective::Parameters => zstd_sys::ZSTD_ResetDirective::ZSTD_reset_parameters, ResetDirective::SessionAndParameters => zstd_sys::ZSTD_ResetDirective::ZSTD_reset_session_and_parameters, } } } #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[repr(u32)] pub enum FrameFormat { /// Regular zstd format. One = zstd_sys::ZSTD_format_e::ZSTD_f_zstd1 as u32, /// Skip the 4 bytes identifying the content as zstd-compressed data. Magicless = zstd_sys::ZSTD_format_e::ZSTD_f_zstd1_magicless as u32, } #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[repr(u32)] pub enum DictAttachPref { DefaultAttach = zstd_sys::ZSTD_dictAttachPref_e::ZSTD_dictDefaultAttach as u32, ForceAttach = zstd_sys::ZSTD_dictAttachPref_e::ZSTD_dictForceAttach as u32, ForceCopy = zstd_sys::ZSTD_dictAttachPref_e::ZSTD_dictForceCopy as u32, ForceLoad = zstd_sys::ZSTD_dictAttachPref_e::ZSTD_dictForceLoad as u32, } #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[repr(u32)] pub enum ParamSwitch { Auto = zstd_sys::ZSTD_ParamSwitch_e::ZSTD_ps_auto as u32, Enable = zstd_sys::ZSTD_ParamSwitch_e::ZSTD_ps_enable as u32, Disable = zstd_sys::ZSTD_ParamSwitch_e::ZSTD_ps_disable as u32, } /// A compression parameter. #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[non_exhaustive] pub enum CParameter { #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] RSyncable(bool), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] Format(FrameFormat), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] ForceMaxWindow(bool), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] ForceAttachDict(DictAttachPref), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] LiteralCompressionMode(ParamSwitch), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] SrcSizeHint(u32), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] EnableDedicatedDictSearch(bool), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] StableInBuffer(bool), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] StableOutBuffer(bool), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] BlockDelimiters(bool), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] ValidateSequences(bool), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] UseBlockSplitter(ParamSwitch), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] UseRowMatchFinder(ParamSwitch), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] DeterministicRefPrefix(bool), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] PrefetchCDictTables(ParamSwitch), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] EnableSeqProducerFallback(bool), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] MaxBlockSize(u32), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] SearchForExternalRepcodes(ParamSwitch), /// Target CBlock size. /// /// Tries to make compressed blocks fit in this size (not a guarantee, just a target). /// Useful to reduce end-to-end latency in low-bandwidth environments. /// /// No target when the value is 0. TargetCBlockSize(u32), /// Compression level to use. /// /// Compression levels are global presets for the other compression parameters. CompressionLevel(CompressionLevel), /// Maximum allowed back-reference distance. /// /// The actual distance is 2 power "this value". WindowLog(u32), HashLog(u32), ChainLog(u32), SearchLog(u32), MinMatch(u32), TargetLength(u32), Strategy(Strategy), EnableLongDistanceMatching(bool), LdmHashLog(u32), LdmMinMatch(u32), LdmBucketSizeLog(u32), LdmHashRateLog(u32), ContentSizeFlag(bool), ChecksumFlag(bool), DictIdFlag(bool), /// How many threads will be spawned. /// /// With a default value of `0`, `compress_stream*` functions block until they complete. /// /// With any other value (including 1, a single compressing thread), these methods directly /// return, and the actual compression is done in the background (until a flush is requested). /// /// Note: this will only work if the `zstdmt` feature is activated. NbWorkers(u32), /// Size in bytes of a compression job. /// /// Does not have any effect when `NbWorkers` is set to 0. /// /// The default value of 0 finds the best job size based on the compression parameters. /// /// Note: this will only work if the `zstdmt` feature is activated. JobSize(u32), /// Specifies how much overlap must be given to each worker. /// /// Possible values: /// /// * `0` (default value): automatic overlap based on compression strategy. /// * `1`: No overlap /// * `1 < n < 9`: Overlap a fraction of the window size, defined as `1/(2 ^ 9-n)`. /// * `9`: Full overlap (as long as the window) /// * `9 < m`: Will return an error. /// /// Note: this will only work if the `zstdmt` feature is activated. OverlapSizeLog(u32), } /// A decompression parameter. #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[non_exhaustive] pub enum DParameter { WindowLogMax(u32), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] /// See `FrameFormat`. Format(FrameFormat), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] StableOutBuffer(bool), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] ForceIgnoreChecksum(bool), #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] RefMultipleDDicts(bool), } /// Wraps the `ZDICT_trainFromBuffer()` function. #[cfg(feature = "zdict_builder")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "zdict_builder")))] pub fn train_from_buffer<C: WriteBuf + ?Sized>( dict_buffer: &mut C, samples_buffer: &[u8], samples_sizes: &[usize], ) -> SafeResult { assert_eq!(samples_buffer.len(), samples_sizes.iter().sum()); unsafe { dict_buffer.write_from(|buffer, capacity| { parse_code(zstd_sys::ZDICT_trainFromBuffer( buffer, capacity, ptr_void(samples_buffer), samples_sizes.as_ptr(), samples_sizes.len() as u32, )) }) } } /// Wraps the `ZDICT_getDictID()` function. #[cfg(feature = "zdict_builder")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "zdict_builder")))] pub fn get_dict_id(dict_buffer: &[u8]) -> Option<NonZeroU32> { NonZeroU32::new(unsafe { zstd_sys::ZDICT_getDictID(ptr_void(dict_buffer), dict_buffer.len()) }) } /// Wraps the `ZSTD_getBlockSize()` function. #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] pub fn get_block_size(cctx: &CCtx) -> usize { unsafe { zstd_sys::ZSTD_getBlockSize(cctx.0.as_ptr()) } } /// Wraps the `ZSTD_decompressBound` function #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] pub fn decompress_bound(data: &[u8]) -> Result<u64, ErrorCode> { let bound = unsafe { zstd_sys::ZSTD_decompressBound(ptr_void(data), data.len()) }; if is_error(bound as usize) { Err(bound as usize) } else { Ok(bound) } } /// Given a buffer of size `src_size`, returns the maximum number of sequences that can ge /// generated. #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] pub fn sequence_bound(src_size: usize) -> usize { // Safety: Just FFI. unsafe { zstd_sys::ZSTD_sequenceBound(src_size) } } /// Returns the minimum extra space when output and input buffer overlap. /// /// When using in-place decompression, the output buffer must be at least this much bigger (in /// bytes) than the input buffer. The extra space must be at the front of the output buffer (the /// input buffer must be at the end of the output buffer). #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] pub fn decompression_margin( compressed_data: &[u8], ) -> Result<usize, ErrorCode> { parse_code(unsafe { zstd_sys::ZSTD_decompressionMargin( ptr_void(compressed_data), compressed_data.len(), ) }) }
//! The seekable format splits the compressed data into a series of "frames", //! each compressed individually so that decompression of a section in the //! middle of an archive only requires zstd to decompress at most a frame's //! worth of extra data, instead of the entire archive. use core::{marker::PhantomData, ptr::NonNull}; use crate::{ parse_code, ptr_mut, ptr_void, CompressionLevel, InBuffer, OutBuffer, SafeResult, WriteBuf, SEEKABLE_FRAMEINDEX_TOOLARGE, }; /// Indicates that the passed frame index is too large. /// /// This happens when `frame_index > num_frames()`. #[derive(Debug, PartialEq)] pub struct FrameIndexTooLargeError; impl core::fmt::Display for FrameIndexTooLargeError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.write_str("Frame index too large") } } /// Required to tracking streaming operation. /// /// Streaming objects are reusable to avoid allocation and deallocation, /// to start a new compression operation call `init()`. pub struct SeekableCStream(NonNull<zstd_sys::ZSTD_seekable_CStream>); unsafe impl Send for SeekableCStream {} unsafe impl Sync for SeekableCStream {} impl Default for SeekableCStream { fn default() -> Self { SeekableCStream::create() } } impl SeekableCStream { /// Tries to create a new `SeekableCStream`. /// /// Returns `None` if zstd returns a NULL pointer - may happen if allocation fails. pub fn try_create() -> Option<Self> { // Safety: Just FFI Some(SeekableCStream(NonNull::new(unsafe { zstd_sys::ZSTD_seekable_createCStream() })?)) } /// Creates a new `SeekableCStream`. /// /// # Panics /// /// If zstd returns a NULL pointer. pub fn create() -> Self { Self::try_create() .expect("zstd returned null pointer when creating new seekable compression stream") } /// Wraps the `ZSTD_seekable_initCStream()` function. /// /// Call this to initialize a `SeekableCStream` object for a new compression operation. /// - `max_frame_size` indicates the size at which to automatically start a new seekable /// frame. `max_frame_size == 0` implies the default maximum size. Smaller frame sizes allow /// faster decompression of small segments, since retrieving a single byte requires /// decompression of the full frame where the byte belongs. In general, size the frames /// to roughly correspond to the access granularity (when it's known). But small sizes /// also reduce compression ratio. Avoid really tiny frame sizes (< 1 KB), that would /// hurt compression ratio considerably. /// - `checksum_flag` indicates whether or not the seek table should include frame /// checksums on the uncompressed data for verification. /// /// Returns a size hint for input to provide for compression, or an error code. pub fn init( &mut self, compression_level: CompressionLevel, checksum_flag: bool, max_frame_size: u32, ) -> SafeResult { // Safety: Just FFI let code = unsafe { zstd_sys::ZSTD_seekable_initCStream( self.0.as_ptr(), compression_level, checksum_flag as i32, max_frame_size, ) }; parse_code(code) } /// Wraps the `ZSTD_seekable_compressStream()` function. /// /// Call this repetitively to consume input stream. The function will automatically /// update both `pos` fields. Note that it may not consume the entire input, in which /// case `pos < size`, and it's up to the caller to present again remaining data. /// /// Returns a size hint, preferred number of bytes to use as input for the next call /// or an error code. Note that it's just a hint, to help latency a little, any other /// value will work fine. pub fn compress_stream<C: WriteBuf + ?Sized>( &mut self, output: &mut OutBuffer<'_, C>, input: &mut InBuffer<'_>, ) -> SafeResult { let mut output = output.wrap(); let mut input = input.wrap(); // Safety: Just FFI let code = unsafe { zstd_sys::ZSTD_seekable_compressStream( self.0.as_ptr(), ptr_mut(&mut output), ptr_mut(&mut input), ) }; parse_code(code) } /// Wraps the `ZSTD_seekable_endFrame()` function. /// /// Call this any time to end the current frame and start a new one. pub fn end_frame<C: WriteBuf + ?Sized>( &mut self, output: &mut OutBuffer<'_, C>, ) -> SafeResult { let mut output = output.wrap(); // Safety: Just FFI let code = unsafe { zstd_sys::ZSTD_seekable_endFrame( self.0.as_ptr(), ptr_mut(&mut output), ) }; parse_code(code) } /// Wraps the `ZSTD_seekable_endStream()` function. /// /// This will end the current frame, and then write the seek table so that /// decompressors can efficiently find compressed frames. /// /// Returns a number > 0 if it was unable to flush all the necessary data to `output`. /// In this case, it should be called again until all remaining data is flushed out and /// 0 is returned. pub fn end_stream<C: WriteBuf + ?Sized>( &mut self, output: &mut OutBuffer<'_, C>, ) -> SafeResult { let mut output = output.wrap(); // Safety: Just FFI let code = unsafe { zstd_sys::ZSTD_seekable_endStream( self.0.as_ptr(), ptr_mut(&mut output), ) }; parse_code(code) } } impl Drop for SeekableCStream { fn drop(&mut self) { // Safety: Just FFI unsafe { zstd_sys::ZSTD_seekable_freeCStream(self.0.as_ptr()); } } } /// Allows for the seek table to be constructed directly. /// /// This table can then be appended to a file of concatenated frames. This allows the /// frames to be compressed independently, even in parallel, and compiled together /// afterward into a seekable archive. pub struct FrameLog(NonNull<zstd_sys::ZSTD_frameLog>); unsafe impl Send for FrameLog {} unsafe impl Sync for FrameLog {} impl FrameLog { /// Tries to create a new `FrameLog`. /// /// Returns `None` if zstd returns a NULL pointer - may happen if allocation fails. pub fn try_create(checksum_flag: bool) -> Option<Self> { Some(FrameLog( // Safety: Just FFI NonNull::new(unsafe { zstd_sys::ZSTD_seekable_createFrameLog(checksum_flag as i32) })?, )) } /// Creates a new `FrameLog`. /// /// # Panics /// /// If zstd returns a NULL pointer. pub fn create(checksum_flag: bool) -> Self { Self::try_create(checksum_flag) .expect("Zstd returned null pointer when creating new frame log") } /// Needs to be called once for each frame in the archive. /// /// If the `FrameLog` was created with `checksum_flag == false`, the `checksum` may be none /// and any value assigned to it will be ignored. If the `FrameLog` was created with /// `checksum_flag == true`, it should be the least significant 32 bits of the XXH64 /// hash of the uncompressed data. pub fn log_frame( &mut self, compressed_size: u32, decompressed_size: u32, checksum: Option<u32>, ) -> SafeResult { // Safety: Just FFI let code = unsafe { zstd_sys::ZSTD_seekable_logFrame( self.0.as_ptr(), compressed_size, decompressed_size, checksum.unwrap_or_default(), ) }; parse_code(code) } /// Writes the seek table to `output`. /// /// Returns 0 if the entire table was written. Otherwise, it will be equal to the number /// of bytes left to write. pub fn write_seek_table<C: WriteBuf + ?Sized>( &mut self, output: &mut OutBuffer<'_, C>, ) -> SafeResult { let mut output = output.wrap(); // Safety: Just FFI let code = unsafe { zstd_sys::ZSTD_seekable_writeSeekTable( self.0.as_ptr(), ptr_mut(&mut output), ) }; parse_code(code) } } impl Drop for FrameLog { fn drop(&mut self) { // Safety: Just FFI unsafe { zstd_sys::ZSTD_seekable_freeFrameLog(self.0.as_ptr()); } } } /// A seekable decompression object. /// /// The lifetime references the potential buffer that holds the data of this seekable. pub struct Seekable<'a>(NonNull<zstd_sys::ZSTD_seekable>, PhantomData<&'a ()>); unsafe impl Send for Seekable<'_> {} unsafe impl Sync for Seekable<'_> {} impl Default for Seekable<'_> { fn default() -> Self { Seekable::create() } } impl<'a> Seekable<'a> { /// Tries to create a new `Seekable`. /// /// Returns `None` if zstd returns a NULL pointer - may happen if allocation fails. pub fn try_create() -> Option<Self> { // Safety: Just FFI Some(Seekable( NonNull::new(unsafe { zstd_sys::ZSTD_seekable_create() })?, PhantomData, )) } /// Creates a new `Seekable`. /// /// # Panics /// /// If zstd returns a NULL pointer. pub fn create() -> Self { Self::try_create() .expect("Zstd returned null pointer when creating new seekable") } /// Initializes this `Seekable` with the the seek table provided in `src`. /// /// The data contained in `src` should be the entire seekable file, including the seek table. /// Consider using `init_advanced()`, if it not feasible to have the entire seekable file in /// memory. pub fn init_buff(&mut self, src: &'a [u8]) -> SafeResult { // Safety: Just FFI let code = unsafe { zstd_sys::ZSTD_seekable_initBuff( self.0.as_ptr(), ptr_void(src), src.len(), ) }; parse_code(code) } /// Decompresses the length of `dst` at decompressed offset `offset`. /// /// May have to decompress the entire prefix of the frame before the desired data if it has /// not already processed this section. If this is called multiple times for a consecutive /// range of data, it will efficiently retain the decompressor object and avoid /// redecompressing frame prefixes. /// /// Returns the number of bytes decompressed, or an error code. pub fn decompress<C: WriteBuf + ?Sized>( &mut self, dst: &mut C, offset: u64, ) -> SafeResult { unsafe { dst.write_from(|buffer, capacity| { parse_code(zstd_sys::ZSTD_seekable_decompress( self.0.as_ptr(), buffer, capacity, offset, )) }) } } /// Decompresses the frame with index `frame_index` into `dst`. /// /// Returns an error if `frame_index` is larger than the value returned by `num_frames()`. pub fn decompress_frame<C: WriteBuf + ?Sized>( &mut self, dst: &mut C, frame_index: u32, ) -> SafeResult { unsafe { dst.write_from(|buffer, capacity| { parse_code(zstd_sys::ZSTD_seekable_decompressFrame( self.0.as_ptr(), buffer, capacity, frame_index, )) }) } } /// Get the number of frames of this seekable object. /// /// Returns `0` if the seekable is not initialized. pub fn num_frames(&self) -> u32 { unsafe { zstd_sys::ZSTD_seekable_getNumFrames(self.0.as_ptr()) } } /// Get the offset of the compressed frame. /// /// Returns an error if `frame_index` is out of range. pub fn frame_compressed_offset( &self, frame_index: u32, ) -> Result<u64, FrameIndexTooLargeError> { let offset = unsafe { zstd_sys::ZSTD_seekable_getFrameCompressedOffset( self.0.as_ptr(), frame_index, ) }; if offset == SEEKABLE_FRAMEINDEX_TOOLARGE { return Err(FrameIndexTooLargeError); } Ok(offset) } /// Get the offset of the decompressed frame. /// /// Returns an error if `frame_index` is out of range. pub fn frame_decompressed_offset( &self, frame_index: u32, ) -> Result<u64, FrameIndexTooLargeError> { let offset = unsafe { zstd_sys::ZSTD_seekable_getFrameDecompressedOffset( self.0.as_ptr(), frame_index, ) }; if offset == SEEKABLE_FRAMEINDEX_TOOLARGE { return Err(FrameIndexTooLargeError); } Ok(offset) } /// Get the size of the compressed frame. /// /// Returns an error if `frame_index` is out of range. pub fn frame_compressed_size(&self, frame_index: u32) -> SafeResult { let code = unsafe { zstd_sys::ZSTD_seekable_getFrameCompressedSize( self.0.as_ptr(), frame_index, ) }; parse_code(code) } /// Get the size of the decompressed frame. /// /// Returns an error if `frame_index` is out of range. pub fn frame_decompressed_size(&self, frame_index: u32) -> SafeResult { let code = unsafe { zstd_sys::ZSTD_seekable_getFrameDecompressedSize( self.0.as_ptr(), frame_index, ) }; parse_code(code) } /// Get the frame at the given offset. pub fn offset_to_frame_index(&self, offset: u64) -> u32 { unsafe { zstd_sys::ZSTD_seekable_offsetToFrameIndex(self.0.as_ptr(), offset) } } } impl<'a> Drop for Seekable<'a> { fn drop(&mut self) { // Safety: Just FFI unsafe { zstd_sys::ZSTD_seekable_free(self.0.as_ptr()); } } } #[cfg(feature = "std")] pub struct AdvancedSeekable<'a, F> { inner: Seekable<'a>, // We can't use Box<F> since it'd break rust aliasing rules when calling // advanced_read/advanced_seek through the C code. src: *mut F, } unsafe impl<F> Send for AdvancedSeekable<'_, F> where F: Send {} unsafe impl<F> Sync for AdvancedSeekable<'_, F> where F: Sync {} #[cfg(feature = "std")] impl<'a, F> core::ops::Deref for AdvancedSeekable<'a, F> { type Target = Seekable<'a>; fn deref(&self) -> &Self::Target { &self.inner } } #[cfg(feature = "std")] impl<'a, F> core::ops::DerefMut for AdvancedSeekable<'a, F> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } #[cfg(feature = "std")] impl<'a, F> Drop for AdvancedSeekable<'a, F> { fn drop(&mut self) { use std::boxed::Box; // this drops the box let _: Box<F> = unsafe { Box::from_raw(self.src) }; } } impl<'a> Seekable<'a> { /// A general API allowing the client to provide its own read and seek implementations. /// /// Initializes this seekable without having the complete compressed data in memory, /// but seeks and reads `src` as required. Use this function if you are looking for /// an alternative to the `ZSTD_seekable_initFile()` function. #[cfg(feature = "std")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "std")))] pub fn init_advanced<F>( self, src: std::boxed::Box<F>, ) -> Result<AdvancedSeekable<'a, F>, crate::ErrorCode> where F: std::io::Read + std::io::Seek, { let opaque = std::boxed::Box::into_raw(src) as *mut F; let custom_file = zstd_sys::ZSTD_seekable_customFile { opaque: opaque as *mut core::ffi::c_void, read: Some(advanced_read::<F>), seek: Some(advanced_seek::<F>), }; // Safety: Just FFI let code = unsafe { zstd_sys::ZSTD_seekable_initAdvanced(self.0.as_ptr(), custom_file) }; if crate::is_error(code) { return Err(code); } Ok(AdvancedSeekable { inner: self, src: opaque, }) } } /// Seeks the read head to `offset` from `origin`, where origin is either `SEEK_SET` /// (beginning of file), `SEEK_CUR` (current position) or `SEEK_END` (end of file), /// as defined in `stdio.h`. /// /// Returns a non-negative value in case of success, and a negative value in case of failure. #[cfg(feature = "std")] unsafe extern "C" fn advanced_seek<S: std::io::Seek>( opaque: *mut core::ffi::c_void, offset: ::core::ffi::c_longlong, origin: ::core::ffi::c_int, ) -> ::core::ffi::c_int { use core::convert::TryFrom; use std::io::SeekFrom; // as defined in stdio.h const SEEK_SET: i32 = 0; const SEEK_CUR: i32 = 1; const SEEK_END: i32 = 2; // Safety: The trait boundaries in `init_advanced()` ensure that `opaque` points to an S let seeker: &mut S = std::mem::transmute(opaque); let pos = match origin { SEEK_SET => { let Ok(offset) = u64::try_from(offset) else { return -1; }; SeekFrom::Start(offset) } SEEK_CUR => SeekFrom::Current(offset), SEEK_END => SeekFrom::End(offset), // not possible _ => return -1, }; if seeker.seek(pos).is_err() { return -1; } 0 } /// Reads exactly `n` bytes into `buffer`. /// /// Returns a non-negative value in case of success, and a negative value in case of failure. #[cfg(feature = "std")] unsafe extern "C" fn advanced_read<R: std::io::Read>( opaque: *mut core::ffi::c_void, buffer: *mut core::ffi::c_void, n: usize, ) -> ::core::ffi::c_int { // Safety: The trait boundaries in `init_advanced()` ensure that `opaque` points to a R let reader: &mut R = std::mem::transmute(opaque); // Safety: zstd ensures the buffer is allocated and safe to use let mut buf = std::slice::from_raw_parts_mut(buffer as *mut u8, n); if reader.read_exact(&mut buf).is_err() { return -1; } 0 } /// Indicates that the seek table could not be created. #[derive(Debug, PartialEq)] pub struct SeekTableCreateError; impl core::fmt::Display for SeekTableCreateError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.write_str("Zstd returned null pointer when creating new seektable from seekable") } } pub struct SeekTable(NonNull<zstd_sys::ZSTD_seekTable>); unsafe impl Send for SeekTable {} unsafe impl Sync for SeekTable {} impl SeekTable { // `ZSTD_seekTable_create_fromSeekable` causes a segmentation fault when called with an // uninitialized Seekable. This function is safe once the issue is resolved and released // upstream. // See https://github.com/facebook/zstd/issues/4200 and https://github.com/facebook/zstd/pull/4201 /// Try to create a `SeekTable` from a `Seekable`. /// /// Memory constrained use cases that manage multiple archives benefit from retaining /// multiple archive seek tables without retaining a `Seekable` instance for each. /// /// May cause a segmentation fault when called wih an uninitialized `Seekable`. pub unsafe fn try_from_seekable<'a>( value: &Seekable<'a>, ) -> Result<Self, SeekTableCreateError> { // Safety: Just FFI let ptr = unsafe { zstd_sys::ZSTD_seekTable_create_fromSeekable(value.0.as_ptr()) }; let ptr = NonNull::new(ptr).ok_or(SeekTableCreateError)?; Ok(Self(ptr)) } /// Get the number of frames of the underlying seekable object. pub fn num_frames(&self) -> u32 { // Safety: Just FFI unsafe { zstd_sys::ZSTD_seekTable_getNumFrames(self.0.as_ptr()) } } /// Get the offset of the compressed frame. /// /// Returns an error if `frame_index` is out of range. pub fn frame_compressed_offset( &self, frame_index: u32, ) -> Result<u64, FrameIndexTooLargeError> { // Safety: Just FFI let offset = unsafe { zstd_sys::ZSTD_seekTable_getFrameCompressedOffset( self.0.as_ptr(), frame_index, ) }; if offset == SEEKABLE_FRAMEINDEX_TOOLARGE { return Err(FrameIndexTooLargeError); } Ok(offset) } /// Get the offset of the decompressed frame. /// /// Returns an error if `frame_index` is out of range. pub fn frame_decompressed_offset( &self, frame_index: u32, ) -> Result<u64, FrameIndexTooLargeError> { // Safety: Just FFI let offset = unsafe { zstd_sys::ZSTD_seekTable_getFrameDecompressedOffset( self.0.as_ptr(), frame_index, ) }; if offset == SEEKABLE_FRAMEINDEX_TOOLARGE { return Err(FrameIndexTooLargeError); } Ok(offset) } /// Get the size of the compressed frame. /// /// Returns an error if `frame_index` is out of range. pub fn frame_compressed_size(&self, frame_index: u32) -> SafeResult { // Safety: Just FFI let code = unsafe { zstd_sys::ZSTD_seekTable_getFrameCompressedSize( self.0.as_ptr(), frame_index, ) }; parse_code(code) } /// Get the size of the decompressed frame. /// /// Returns an error if `frame_index` is out of range. pub fn frame_decompressed_size(&self, frame_index: u32) -> SafeResult { // Safety: Just FFI let code = unsafe { zstd_sys::ZSTD_seekTable_getFrameDecompressedSize( self.0.as_ptr(), frame_index, ) }; parse_code(code) } /// Get the frame at the given offset. pub fn offset_to_frame_index(&self, offset: u64) -> u32 { // Safety: Just FFI unsafe { zstd_sys::ZSTD_seekTable_offsetToFrameIndex( self.0.as_ptr(), offset, ) } } } impl Drop for SeekTable { fn drop(&mut self) { // Safety: Just FFI unsafe { zstd_sys::ZSTD_seekTable_free(self.0.as_ptr()); } } }
extern crate std; use crate as zstd_safe; use self::std::vec::Vec; const INPUT: &[u8] = b"Rust is a multi-paradigm system programming language focused on safety, especially safe concurrency. Rust is syntactically similar to C++, but is designed to provide better memory safety while maintaining high performance."; const LONG_CONTENT: &str = include_str!("lib.rs"); #[cfg(feature = "std")] #[test] fn test_writebuf() { use zstd_safe::WriteBuf; let mut data = Vec::with_capacity(10); unsafe { data.write_from(|ptr, n| { assert!(n >= 4); let ptr = ptr as *mut u8; ptr.write(0); ptr.add(1).write(1); ptr.add(2).write(2); ptr.add(3).write(3); Ok(4) }) } .unwrap(); assert_eq!(data.as_slice(), &[0, 1, 2, 3]); let mut cursor = std::io::Cursor::new(&mut data); // Here we use a position larger than the actual data. // So expect the data to be zero-filled. cursor.set_position(6); unsafe { cursor.write_from(|ptr, n| { assert!(n >= 4); let ptr = ptr as *mut u8; ptr.write(4); ptr.add(1).write(5); ptr.add(2).write(6); ptr.add(3).write(7); Ok(4) }) } .unwrap(); assert_eq!(data.as_slice(), &[0, 1, 2, 3, 0, 0, 4, 5, 6, 7]); } #[cfg(feature = "std")] #[test] fn test_simple_cycle() { let mut buffer = std::vec![0u8; 256]; let written = zstd_safe::compress(&mut buffer, INPUT, 3).unwrap(); let compressed = &buffer[..written]; let mut buffer = std::vec![0u8; 256]; let written = zstd_safe::decompress(&mut buffer, compressed).unwrap(); let decompressed = &buffer[..written]; assert_eq!(INPUT, decompressed); } #[test] fn test_cctx_cycle() { let mut buffer = std::vec![0u8; 256]; let mut cctx = zstd_safe::CCtx::default(); let written = cctx.compress(&mut buffer[..], INPUT, 1).unwrap(); let compressed = &buffer[..written]; let mut dctx = zstd_safe::DCtx::default(); let mut buffer = std::vec![0u8; 256]; let written = dctx.decompress(&mut buffer[..], compressed).unwrap(); let decompressed = &buffer[..written]; assert_eq!(INPUT, decompressed); } #[test] fn test_dictionary() { // Prepare some content to train the dictionary. let bytes = LONG_CONTENT.as_bytes(); let line_sizes: Vec<usize> = LONG_CONTENT.lines().map(|line| line.len() + 1).collect(); // Train the dictionary let mut dict_buffer = std::vec![0u8; 100_000]; let written = zstd_safe::train_from_buffer(&mut dict_buffer[..], bytes, &line_sizes) .unwrap(); let dict_buffer = &dict_buffer[..written]; // Create pre-hashed dictionaries for (de)compression let cdict = zstd_safe::create_cdict(dict_buffer, 3); let ddict = zstd_safe::create_ddict(dict_buffer); // Compress data let mut cctx = zstd_safe::CCtx::default(); cctx.ref_cdict(&cdict).unwrap(); let mut buffer = std::vec![0u8; 1024 * 1024]; // First, try to compress without a dict let big_written = zstd_safe::compress(&mut buffer[..], bytes, 3).unwrap(); let written = cctx .compress2(&mut buffer[..], bytes) .map_err(zstd_safe::get_error_name) .unwrap(); assert!(big_written > written); let compressed = &buffer[..written]; // Decompress data let mut dctx = zstd_safe::DCtx::default(); dctx.ref_ddict(&ddict).unwrap(); let mut buffer = std::vec![0u8; 1024 * 1024]; let written = dctx .decompress(&mut buffer[..], compressed) .map_err(zstd_safe::get_error_name) .unwrap(); let decompressed = &buffer[..written]; // Profit! assert_eq!(bytes, decompressed); } #[test] fn test_checksum() { let mut buffer = std::vec![0u8; 256]; let mut cctx = zstd_safe::CCtx::default(); cctx.set_parameter(zstd_safe::CParameter::ChecksumFlag(true)) .unwrap(); let written = cctx.compress2(&mut buffer[..], INPUT).unwrap(); let compressed = &mut buffer[..written]; let mut dctx = zstd_safe::DCtx::default(); let mut buffer = std::vec![0u8; 1024*1024]; let written = dctx .decompress(&mut buffer[..], compressed) .map_err(zstd_safe::get_error_name) .unwrap(); let decompressed = &buffer[..written]; assert_eq!(INPUT, decompressed); // Now try again with some corruption // TODO: Find a mutation that _wouldn't_ be detected without checksums. // (Most naive changes already trigger a "corrupt block" error.) if let Some(last) = compressed.last_mut() { *last = last.saturating_sub(1); } let err = dctx .decompress(&mut buffer[..], compressed) .map_err(zstd_safe::get_error_name) .err() .unwrap(); // The error message will complain about the checksum. assert!(err.contains("checksum")); } #[cfg(all(feature = "experimental", feature = "std"))] #[test] fn test_upper_bound() { let mut buffer = std::vec![0u8; 256]; assert!(zstd_safe::decompress_bound(&buffer).is_err()); let written = zstd_safe::compress(&mut buffer, INPUT, 3).unwrap(); let compressed = &buffer[..written]; assert_eq!( zstd_safe::decompress_bound(&compressed), Ok(INPUT.len() as u64) ); } #[cfg(feature = "seekable")] #[test] fn test_seekable_cycle() { let seekable_archive = new_seekable_archive(INPUT); let mut seekable = crate::seekable::Seekable::create(); seekable .init_buff(&seekable_archive) .map_err(zstd_safe::get_error_name) .unwrap(); decompress_seekable(&mut seekable); // Check that the archive can also be decompressed by a regular function let mut buffer = std::vec![0u8; 256]; let written = zstd_safe::decompress(&mut buffer[..], &seekable_archive) .map_err(zstd_safe::get_error_name) .unwrap(); let decompressed = &buffer[..written]; assert_eq!(INPUT, decompressed); // Trigger FrameIndexTooLargeError let frame_index = seekable.num_frames() + 1; assert_eq!( seekable.frame_compressed_offset(frame_index).unwrap_err(), crate::seekable::FrameIndexTooLargeError ); } #[cfg(feature = "seekable")] #[test] fn test_seekable_seek_table() { use crate::seekable::{FrameIndexTooLargeError, SeekTable, Seekable}; let seekable_archive = new_seekable_archive(INPUT); let mut seekable = Seekable::create(); seekable .init_buff(&seekable_archive) .map_err(zstd_safe::get_error_name) .unwrap(); // Try to create a seek table from the seekable let seek_table = unsafe { SeekTable::try_from_seekable(&seekable).unwrap() }; // Seekable and seek table should return the same results assert_eq!(seekable.num_frames(), seek_table.num_frames()); assert_eq!( seekable.frame_compressed_offset(2).unwrap(), seek_table.frame_compressed_offset(2).unwrap() ); assert_eq!( seekable.frame_decompressed_offset(2).unwrap(), seek_table.frame_decompressed_offset(2).unwrap() ); assert_eq!( seekable.frame_compressed_size(2).unwrap(), seek_table.frame_compressed_size(2).unwrap() ); assert_eq!( seekable.frame_decompressed_size(2).unwrap(), seek_table.frame_decompressed_size(2).unwrap() ); // Trigger FrameIndexTooLargeError let frame_index = seekable.num_frames() + 1; assert_eq!( seek_table.frame_compressed_offset(frame_index).unwrap_err(), FrameIndexTooLargeError ); } #[cfg(all(feature = "std", feature = "seekable"))] #[test] fn test_seekable_advanced_cycle() { use crate::seekable::Seekable; use std::{boxed::Box, io::Cursor}; // Wrap the archive in a cursor that implements Read and Seek, // a file would also work let seekable_archive = Cursor::new(new_seekable_archive(INPUT)); let mut seekable = Seekable::create() .init_advanced(Box::new(seekable_archive)) .map_err(zstd_safe::get_error_name) .unwrap(); decompress_seekable(&mut seekable); } #[cfg(feature = "seekable")] fn new_seekable_archive(input: &[u8]) -> Vec<u8> { use crate::{seekable::SeekableCStream, InBuffer, OutBuffer}; // Make sure the buffer is big enough // The buffer needs to be bigger as the uncompressed data here as the seekable archive has // more meta data than actual compressed data because the input is really small and we use // a max_frame_size of 64, which is way to small for real-world usages. let mut buffer = std::vec![0u8; 512]; let mut cstream = SeekableCStream::create(); cstream .init(3, true, 64) .map_err(zstd_safe::get_error_name) .unwrap(); let mut in_buffer = InBuffer::around(input); let mut out_buffer = OutBuffer::around(&mut buffer[..]); // This could get stuck if the buffer is too small while in_buffer.pos() < in_buffer.src.len() { cstream .compress_stream(&mut out_buffer, &mut in_buffer) .map_err(zstd_safe::get_error_name) .unwrap(); } // Make sure everything is flushed to out_buffer loop { if cstream .end_stream(&mut out_buffer) .map_err(zstd_safe::get_error_name) .unwrap() == 0 { break; } } Vec::from(out_buffer.as_slice()) } #[cfg(feature = "seekable")] fn decompress_seekable(seekable: &mut crate::seekable::Seekable<'_>) { // Make the buffer as big as max_frame_size so it can hold a complete frame let mut buffer = std::vec![0u8; 64]; // Decompress only the first frame let written = seekable .decompress(&mut buffer[..], 0) .map_err(zstd_safe::get_error_name) .unwrap(); let decompressed = &buffer[..written]; assert!(INPUT.starts_with(decompressed)); assert_eq!(decompressed.len(), 64); // Make the buffer big enough to hold the complete input let mut buffer = std::vec![0u8; 256]; // Decompress everything let written = seekable .decompress(&mut buffer[..], 0) .map_err(zstd_safe::get_error_name) .unwrap(); let decompressed = &buffer[..written]; assert_eq!(INPUT, decompressed); }
use std::ffi::OsStr; use std::path::{Path, PathBuf}; use std::{env, fmt, fs}; #[cfg(feature = "bindgen")] fn generate_bindings(defs: Vec<&str>, headerpaths: Vec<PathBuf>) { let bindings = bindgen::Builder::default().header("zstd.h"); #[cfg(feature = "zdict_builder")] let bindings = bindings.header("zdict.h"); #[cfg(feature = "seekable")] let bindings = bindings.header("zstd_seekable.h"); let bindings = bindings .blocklist_type("max_align_t") .size_t_is_usize(true) .use_core() .rustified_enum(".*") .clang_args( headerpaths .into_iter() .map(|path| format!("-I{}", path.display())), ) .clang_args(defs.into_iter().map(|def| format!("-D{}", def))); #[cfg(feature = "experimental")] let bindings = bindings .clang_arg("-DZSTD_STATIC_LINKING_ONLY") .clang_arg("-DZDICT_STATIC_LINKING_ONLY") .clang_arg("-DZSTD_RUST_BINDINGS_EXPERIMENTAL"); #[cfg(not(feature = "std"))] let bindings = bindings.ctypes_prefix("libc"); #[cfg(feature = "seekable")] let bindings = bindings .blocklist_function("ZSTD_seekable_initFile") .blocklist_var("ZSTD_seekTableFooterSize"); let bindings = bindings.generate().expect("Unable to generate bindings"); let out_path = PathBuf::from(env::var_os("OUT_DIR").unwrap()); bindings .write_to_file(out_path.join("bindings.rs")) .expect("Could not write bindings"); } #[cfg(not(feature = "bindgen"))] fn generate_bindings(_: Vec<&str>, _: Vec<PathBuf>) {} fn pkg_config() -> (Vec<&'static str>, Vec<PathBuf>) { let library = pkg_config::Config::new() .statik(true) .cargo_metadata(!cfg!(feature = "non-cargo")) .probe("libzstd") .expect("Can't probe for zstd in pkg-config"); (vec!["PKG_CONFIG"], library.include_paths) } #[cfg(not(feature = "legacy"))] fn set_legacy(_config: &mut cc::Build) {} #[cfg(feature = "legacy")] fn set_legacy(config: &mut cc::Build) { config.define("ZSTD_LEGACY_SUPPORT", Some("1")); config.include("zstd/lib/legacy"); } #[cfg(feature = "zstdmt")] fn set_pthread(config: &mut cc::Build) { config.flag("-pthread"); } #[cfg(not(feature = "zstdmt"))] fn set_pthread(_config: &mut cc::Build) {} #[cfg(feature = "zstdmt")] fn enable_threading(config: &mut cc::Build) { config.define("ZSTD_MULTITHREAD", Some("")); } #[cfg(not(feature = "zstdmt"))] fn enable_threading(_config: &mut cc::Build) {} /// This function would find the first flag in `flags` that is supported /// and add that to `config`. #[allow(dead_code)] fn flag_if_supported_with_fallbacks(config: &mut cc::Build, flags: &[&str]) { let option = flags .iter() .find(|flag| config.is_flag_supported(flag).unwrap_or_default()); if let Some(flag) = option { config.flag(flag); } } fn compile_zstd() { let mut config = cc::Build::new(); // Search the following directories for C files to add to the compilation. for dir in &[ "zstd/lib/common", "zstd/lib/compress", "zstd/lib/decompress", #[cfg(feature = "seekable")] "zstd/contrib/seekable_format", #[cfg(feature = "zdict_builder")] "zstd/lib/dictBuilder", #[cfg(feature = "legacy")] "zstd/lib/legacy", ] { let mut entries: Vec<_> = fs::read_dir(dir) .unwrap() .map(Result::unwrap) .filter_map(|entry| { let filename = entry.file_name(); if Path::new(&filename).extension() == Some(OsStr::new("c")) // Skip xxhash*.c files: since we are using the "PRIVATE API" // mode, it will be inlined in the headers. && !filename.to_string_lossy().contains("xxhash") { Some(entry.path()) } else { None } }) .collect(); entries.sort(); config.files(entries); } // Either include ASM files, or disable ASM entirely. // Also disable it on windows, apparently it doesn't do well with these .S files at the moment. if cfg!(feature = "no_asm") || std::env::var("CARGO_CFG_WINDOWS").is_ok() { config.define("ZSTD_DISABLE_ASM", Some("")); } else { config.file("zstd/lib/decompress/huf_decompress_amd64.S"); } // List out the WASM targets that need wasm-shim. // Note that Emscripten already provides its own C standard library so // wasm32-unknown-emscripten should not be included here. // See: https://github.com/gyscos/zstd-rs/pull/209 let need_wasm_shim = !cfg!(feature = "no_wasm_shim") && env::var("TARGET").map_or(false, |target| { target == "wasm32-unknown-unknown" || target.starts_with("wasm32-wasi") }); if need_wasm_shim { cargo_print(&"rerun-if-changed=wasm-shim/stdlib.h"); cargo_print(&"rerun-if-changed=wasm-shim/string.h"); config.include("wasm-shim/"); } // Some extra parameters config.include("zstd/lib/"); config.include("zstd/lib/common"); config.warnings(false); config.define("ZSTD_LIB_DEPRECATED", Some("0")); config .flag_if_supported("-ffunction-sections") .flag_if_supported("-fdata-sections") .flag_if_supported("-fmerge-all-constants"); if cfg!(feature = "fat-lto") { config.flag_if_supported("-flto"); } else if cfg!(feature = "thin-lto") { flag_if_supported_with_fallbacks( &mut config, &["-flto=thin", "-flto"], ); } #[cfg(feature = "thin")] { // Here we try to build a lib as thin/small as possible. // We cannot use ZSTD_LIB_MINIFY since it is only // used in Makefile to define other options. config .define("HUF_FORCE_DECOMPRESS_X1", Some("1")) .define("ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT", Some("1")) .define("ZSTD_NO_INLINE", Some("1")) // removes the error messages that are // otherwise returned by ZSTD_getErrorName .define("ZSTD_STRIP_ERROR_STRINGS", Some("1")); // Disable use of BMI2 instructions since it involves runtime checking // of the feature and fallback if no BMI2 instruction is detected. config.define("DYNAMIC_BMI2", Some("0")); // Disable support for all legacy formats #[cfg(not(feature = "legacy"))] config.define("ZSTD_LEGACY_SUPPORT", Some("0")); config.opt_level_str("z"); } // Hide symbols from resulting library, // so we can be used with another zstd-linking lib. // See https://github.com/gyscos/zstd-rs/issues/58 config.flag("-fvisibility=hidden"); config.define("XXH_PRIVATE_API", Some("")); config.define("ZSTDLIB_VISIBILITY", Some("")); #[cfg(feature = "zdict_builder")] config.define("ZDICTLIB_VISIBILITY", Some("")); config.define("ZSTDERRORLIB_VISIBILITY", Some("")); // https://github.com/facebook/zstd/blob/d69d08ed6c83563b57d98132e1e3f2487880781e/lib/common/debug.h#L60 /* recommended values for DEBUGLEVEL : * 0 : release mode, no debug, all run-time checks disabled * 1 : enables assert() only, no display * 2 : reserved, for currently active debug path * 3 : events once per object lifetime (CCtx, CDict, etc.) * 4 : events once per frame * 5 : events once per block * 6 : events once per sequence (verbose) * 7+: events at every position (*very* verbose) */ #[cfg(feature = "debug")] if !is_wasm { config.define("DEBUGLEVEL", Some("5")); } set_pthread(&mut config); set_legacy(&mut config); enable_threading(&mut config); // Compile! config.compile("libzstd.a"); let src = env::current_dir().unwrap().join("zstd").join("lib"); let dst = PathBuf::from(env::var_os("OUT_DIR").unwrap()); let include = dst.join("include"); fs::create_dir_all(&include).unwrap(); fs::copy(src.join("zstd.h"), include.join("zstd.h")).unwrap(); fs::copy(src.join("zstd_errors.h"), include.join("zstd_errors.h")) .unwrap(); #[cfg(feature = "zdict_builder")] fs::copy(src.join("zdict.h"), include.join("zdict.h")).unwrap(); cargo_print(&format_args!("root={}", dst.display())); } /// Print a line for cargo. /// /// If non-cargo is set, do not print anything. fn cargo_print(content: &dyn fmt::Display) { if cfg!(not(feature = "non-cargo")) { println!("cargo:{}", content); } } fn main() { cargo_print(&"rerun-if-env-changed=ZSTD_SYS_USE_PKG_CONFIG"); let target_arch = std::env::var("CARGO_CFG_TARGET_ARCH").unwrap_or_default(); let target_os = std::env::var("CARGO_CFG_TARGET_OS").unwrap_or_default(); if target_arch == "wasm32" || target_os == "hermit" { cargo_print(&"rustc-cfg=feature=\"std\""); } // println!("cargo:rustc-link-lib=zstd"); let (defs, headerpaths) = if cfg!(feature = "pkg-config") || env::var_os("ZSTD_SYS_USE_PKG_CONFIG").is_some() { pkg_config() } else { if !Path::new("zstd/lib").exists() { panic!("Folder 'zstd/lib' does not exists. Maybe you forgot to clone the 'zstd' submodule?"); } let manifest_dir = PathBuf::from( env::var_os("CARGO_MANIFEST_DIR") .expect("Manifest dir is always set by cargo"), ); compile_zstd(); (vec![], vec![manifest_dir.join("zstd/lib")]) }; let includes: Vec<_> = headerpaths .iter() .map(|p| p.display().to_string()) .collect(); cargo_print(&format_args!("include={}", includes.join(";"))); generate_bindings(defs, headerpaths); }
/* This file is auto-generated from the public API of the zstd library. It is released under the same BSD license. BSD License For Zstandard software Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name Facebook, nor Meta, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* automatically generated by rust-bindgen 0.71.1 */ extern "C" { #[doc = " ZDICT_trainFromBuffer():\n Train a dictionary from an array of samples.\n Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4,\n f=20, and accel=1.\n Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\n supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\n The resulting dictionary will be saved into `dictBuffer`.\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n or an error code, which can be tested with ZDICT_isError().\n Note: Dictionary training will fail if there are not enough samples to construct a\n dictionary, or if most of the samples are too small (< 8 bytes being the lower limit).\n If dictionary training fails, you should use zstd without a dictionary, as the dictionary\n would've been ineffective anyways. If you believe your samples would benefit from a dictionary\n please open an issue with details, and we can look into it.\n Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB.\n Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\n It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\n In general, it's recommended to provide a few thousands samples, though this can vary a lot.\n It's recommended that total size of all samples be about ~x100 times the target size of dictionary."] pub fn ZDICT_trainFromBuffer( dictBuffer: *mut ::core::ffi::c_void, dictBufferCapacity: usize, samplesBuffer: *const ::core::ffi::c_void, samplesSizes: *const usize, nbSamples: ::core::ffi::c_uint, ) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZDICT_params_t { #[doc = "< optimize for a specific zstd compression level; 0 means default"] pub compressionLevel: ::core::ffi::c_int, #[doc = "< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug;"] pub notificationLevel: ::core::ffi::c_uint, #[doc = "< force dictID value; 0 means auto mode (32-bits random value)\n NOTE: The zstd format reserves some dictionary IDs for future use.\n You may use them in private settings, but be warned that they\n may be used by zstd in a public dictionary registry in the future.\n These dictionary IDs are:\n - low range : <= 32767\n - high range : >= (2^31)"] pub dictID: ::core::ffi::c_uint, } extern "C" { #[doc = " ZDICT_finalizeDictionary():\n Given a custom content as a basis for dictionary, and a set of samples,\n finalize dictionary by adding headers and statistics according to the zstd\n dictionary format.\n\n Samples must be stored concatenated in a flat buffer `samplesBuffer`,\n supplied with an array of sizes `samplesSizes`, providing the size of each\n sample in order. The samples are used to construct the statistics, so they\n should be representative of what you will compress with this dictionary.\n\n The compression level can be set in `parameters`. You should pass the\n compression level you expect to use in production. The statistics for each\n compression level differ, so tuning the dictionary for the compression level\n can help quite a bit.\n\n You can set an explicit dictionary ID in `parameters`, or allow us to pick\n a random dictionary ID for you, but we can't guarantee no collisions.\n\n The dstDictBuffer and the dictContent may overlap, and the content will be\n appended to the end of the header. If the header + the content doesn't fit in\n maxDictSize the beginning of the content is truncated to make room, since it\n is presumed that the most profitable content is at the end of the dictionary,\n since that is the cheapest to reference.\n\n `maxDictSize` must be >= max(dictContentSize, ZDICT_DICTSIZE_MIN).\n\n @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`),\n or an error code, which can be tested by ZDICT_isError().\n Note: ZDICT_finalizeDictionary() will push notifications into stderr if\n instructed to, using notificationLevel>0.\n NOTE: This function currently may fail in several edge cases including:\n * Not enough samples\n * Samples are uncompressible\n * Samples are all exactly the same"] pub fn ZDICT_finalizeDictionary( dstDictBuffer: *mut ::core::ffi::c_void, maxDictSize: usize, dictContent: *const ::core::ffi::c_void, dictContentSize: usize, samplesBuffer: *const ::core::ffi::c_void, samplesSizes: *const usize, nbSamples: ::core::ffi::c_uint, parameters: ZDICT_params_t, ) -> usize; } extern "C" { pub fn ZDICT_getDictID( dictBuffer: *const ::core::ffi::c_void, dictSize: usize, ) -> ::core::ffi::c_uint; } extern "C" { pub fn ZDICT_getDictHeaderSize( dictBuffer: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } extern "C" { pub fn ZDICT_isError(errorCode: usize) -> ::core::ffi::c_uint; } extern "C" { pub fn ZDICT_getErrorName(errorCode: usize) -> *const ::core::ffi::c_char; }
/* This file is auto-generated from the public API of the zstd library. It is released under the same BSD license. BSD License For Zstandard software Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name Facebook, nor Meta, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* automatically generated by rust-bindgen 0.71.1 */ pub const ZDICT_DICTSIZE_MIN: u32 = 256; pub const ZDICT_CONTENTSIZE_MIN: u32 = 128; extern "C" { #[doc = " ZDICT_trainFromBuffer():\n Train a dictionary from an array of samples.\n Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4,\n f=20, and accel=1.\n Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\n supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\n The resulting dictionary will be saved into `dictBuffer`.\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n or an error code, which can be tested with ZDICT_isError().\n Note: Dictionary training will fail if there are not enough samples to construct a\n dictionary, or if most of the samples are too small (< 8 bytes being the lower limit).\n If dictionary training fails, you should use zstd without a dictionary, as the dictionary\n would've been ineffective anyways. If you believe your samples would benefit from a dictionary\n please open an issue with details, and we can look into it.\n Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB.\n Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\n It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\n In general, it's recommended to provide a few thousands samples, though this can vary a lot.\n It's recommended that total size of all samples be about ~x100 times the target size of dictionary."] pub fn ZDICT_trainFromBuffer( dictBuffer: *mut ::core::ffi::c_void, dictBufferCapacity: usize, samplesBuffer: *const ::core::ffi::c_void, samplesSizes: *const usize, nbSamples: ::core::ffi::c_uint, ) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZDICT_params_t { #[doc = "< optimize for a specific zstd compression level; 0 means default"] pub compressionLevel: ::core::ffi::c_int, #[doc = "< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug;"] pub notificationLevel: ::core::ffi::c_uint, #[doc = "< force dictID value; 0 means auto mode (32-bits random value)\n NOTE: The zstd format reserves some dictionary IDs for future use.\n You may use them in private settings, but be warned that they\n may be used by zstd in a public dictionary registry in the future.\n These dictionary IDs are:\n - low range : <= 32767\n - high range : >= (2^31)"] pub dictID: ::core::ffi::c_uint, } extern "C" { #[doc = " ZDICT_finalizeDictionary():\n Given a custom content as a basis for dictionary, and a set of samples,\n finalize dictionary by adding headers and statistics according to the zstd\n dictionary format.\n\n Samples must be stored concatenated in a flat buffer `samplesBuffer`,\n supplied with an array of sizes `samplesSizes`, providing the size of each\n sample in order. The samples are used to construct the statistics, so they\n should be representative of what you will compress with this dictionary.\n\n The compression level can be set in `parameters`. You should pass the\n compression level you expect to use in production. The statistics for each\n compression level differ, so tuning the dictionary for the compression level\n can help quite a bit.\n\n You can set an explicit dictionary ID in `parameters`, or allow us to pick\n a random dictionary ID for you, but we can't guarantee no collisions.\n\n The dstDictBuffer and the dictContent may overlap, and the content will be\n appended to the end of the header. If the header + the content doesn't fit in\n maxDictSize the beginning of the content is truncated to make room, since it\n is presumed that the most profitable content is at the end of the dictionary,\n since that is the cheapest to reference.\n\n `maxDictSize` must be >= max(dictContentSize, ZDICT_DICTSIZE_MIN).\n\n @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`),\n or an error code, which can be tested by ZDICT_isError().\n Note: ZDICT_finalizeDictionary() will push notifications into stderr if\n instructed to, using notificationLevel>0.\n NOTE: This function currently may fail in several edge cases including:\n * Not enough samples\n * Samples are uncompressible\n * Samples are all exactly the same"] pub fn ZDICT_finalizeDictionary( dstDictBuffer: *mut ::core::ffi::c_void, maxDictSize: usize, dictContent: *const ::core::ffi::c_void, dictContentSize: usize, samplesBuffer: *const ::core::ffi::c_void, samplesSizes: *const usize, nbSamples: ::core::ffi::c_uint, parameters: ZDICT_params_t, ) -> usize; } extern "C" { pub fn ZDICT_getDictID( dictBuffer: *const ::core::ffi::c_void, dictSize: usize, ) -> ::core::ffi::c_uint; } extern "C" { pub fn ZDICT_getDictHeaderSize( dictBuffer: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } extern "C" { pub fn ZDICT_isError(errorCode: usize) -> ::core::ffi::c_uint; } extern "C" { pub fn ZDICT_getErrorName(errorCode: usize) -> *const ::core::ffi::c_char; } #[doc = " ZDICT_cover_params_t:\n k and d are the only required parameters.\n For others, value 0 means default."] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZDICT_cover_params_t { pub k: ::core::ffi::c_uint, pub d: ::core::ffi::c_uint, pub steps: ::core::ffi::c_uint, pub nbThreads: ::core::ffi::c_uint, pub splitPoint: f64, pub shrinkDict: ::core::ffi::c_uint, pub shrinkDictMaxRegression: ::core::ffi::c_uint, pub zParams: ZDICT_params_t, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZDICT_fastCover_params_t { pub k: ::core::ffi::c_uint, pub d: ::core::ffi::c_uint, pub f: ::core::ffi::c_uint, pub steps: ::core::ffi::c_uint, pub nbThreads: ::core::ffi::c_uint, pub splitPoint: f64, pub accel: ::core::ffi::c_uint, pub shrinkDict: ::core::ffi::c_uint, pub shrinkDictMaxRegression: ::core::ffi::c_uint, pub zParams: ZDICT_params_t, } extern "C" { #[doc = " ZDICT_trainFromBuffer_cover():\n Train a dictionary from an array of samples using the COVER algorithm.\n Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\n supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\n The resulting dictionary will be saved into `dictBuffer`.\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n or an error code, which can be tested with ZDICT_isError().\n See ZDICT_trainFromBuffer() for details on failure modes.\n Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte.\n Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\n It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\n In general, it's recommended to provide a few thousands samples, though this can vary a lot.\n It's recommended that total size of all samples be about ~x100 times the target size of dictionary."] pub fn ZDICT_trainFromBuffer_cover( dictBuffer: *mut ::core::ffi::c_void, dictBufferCapacity: usize, samplesBuffer: *const ::core::ffi::c_void, samplesSizes: *const usize, nbSamples: ::core::ffi::c_uint, parameters: ZDICT_cover_params_t, ) -> usize; } extern "C" { #[doc = " ZDICT_optimizeTrainFromBuffer_cover():\n The same requirements as above hold for all the parameters except `parameters`.\n This function tries many parameter combinations and picks the best parameters.\n `*parameters` is filled with the best parameters found,\n dictionary constructed with those parameters is stored in `dictBuffer`.\n\n All of the parameters d, k, steps are optional.\n If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.\n if steps is zero it defaults to its default value.\n If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].\n\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n or an error code, which can be tested with ZDICT_isError().\n On success `*parameters` contains the parameters selected.\n See ZDICT_trainFromBuffer() for details on failure modes.\n Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread."] pub fn ZDICT_optimizeTrainFromBuffer_cover( dictBuffer: *mut ::core::ffi::c_void, dictBufferCapacity: usize, samplesBuffer: *const ::core::ffi::c_void, samplesSizes: *const usize, nbSamples: ::core::ffi::c_uint, parameters: *mut ZDICT_cover_params_t, ) -> usize; } extern "C" { #[doc = " ZDICT_trainFromBuffer_fastCover():\n Train a dictionary from an array of samples using a modified version of COVER algorithm.\n Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\n supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\n d and k are required.\n All other parameters are optional, will use default values if not provided\n The resulting dictionary will be saved into `dictBuffer`.\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n or an error code, which can be tested with ZDICT_isError().\n See ZDICT_trainFromBuffer() for details on failure modes.\n Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory.\n Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\n It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\n In general, it's recommended to provide a few thousands samples, though this can vary a lot.\n It's recommended that total size of all samples be about ~x100 times the target size of dictionary."] pub fn ZDICT_trainFromBuffer_fastCover( dictBuffer: *mut ::core::ffi::c_void, dictBufferCapacity: usize, samplesBuffer: *const ::core::ffi::c_void, samplesSizes: *const usize, nbSamples: ::core::ffi::c_uint, parameters: ZDICT_fastCover_params_t, ) -> usize; } extern "C" { #[doc = " ZDICT_optimizeTrainFromBuffer_fastCover():\n The same requirements as above hold for all the parameters except `parameters`.\n This function tries many parameter combinations (specifically, k and d combinations)\n and picks the best parameters. `*parameters` is filled with the best parameters found,\n dictionary constructed with those parameters is stored in `dictBuffer`.\n All of the parameters d, k, steps, f, and accel are optional.\n If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.\n if steps is zero it defaults to its default value.\n If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].\n If f is zero, default value of 20 is used.\n If accel is zero, default value of 1 is used.\n\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n or an error code, which can be tested with ZDICT_isError().\n On success `*parameters` contains the parameters selected.\n See ZDICT_trainFromBuffer() for details on failure modes.\n Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread."] pub fn ZDICT_optimizeTrainFromBuffer_fastCover( dictBuffer: *mut ::core::ffi::c_void, dictBufferCapacity: usize, samplesBuffer: *const ::core::ffi::c_void, samplesSizes: *const usize, nbSamples: ::core::ffi::c_uint, parameters: *mut ZDICT_fastCover_params_t, ) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZDICT_legacy_params_t { pub selectivityLevel: ::core::ffi::c_uint, pub zParams: ZDICT_params_t, } extern "C" { #[doc = " ZDICT_trainFromBuffer_legacy():\n Train a dictionary from an array of samples.\n Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\n supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\n The resulting dictionary will be saved into `dictBuffer`.\n `parameters` is optional and can be provided with values set to 0 to mean \"default\".\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n or an error code, which can be tested with ZDICT_isError().\n See ZDICT_trainFromBuffer() for details on failure modes.\n Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\n It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\n In general, it's recommended to provide a few thousands samples, though this can vary a lot.\n It's recommended that total size of all samples be about ~x100 times the target size of dictionary.\n Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0."] pub fn ZDICT_trainFromBuffer_legacy( dictBuffer: *mut ::core::ffi::c_void, dictBufferCapacity: usize, samplesBuffer: *const ::core::ffi::c_void, samplesSizes: *const usize, nbSamples: ::core::ffi::c_uint, parameters: ZDICT_legacy_params_t, ) -> usize; } extern "C" { pub fn ZDICT_addEntropyTablesFromBuffer( dictBuffer: *mut ::core::ffi::c_void, dictContentSize: usize, dictBufferCapacity: usize, samplesBuffer: *const ::core::ffi::c_void, samplesSizes: *const usize, nbSamples: ::core::ffi::c_uint, ) -> usize; }
/* This file is auto-generated from the public API of the zstd library. It is released under the same BSD license. BSD License For Zstandard software Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name Facebook, nor Meta, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* automatically generated by rust-bindgen 0.66.1 */ pub const ZDICT_DICTSIZE_MIN: u32 = 256; pub const ZDICT_CONTENTSIZE_MIN: u32 = 128; extern "C" { #[doc = " ZDICT_trainFromBuffer():\n Train a dictionary from an array of samples.\n Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4,\n f=20, and accel=1.\n Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\n supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\n The resulting dictionary will be saved into `dictBuffer`.\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n or an error code, which can be tested with ZDICT_isError().\n Note: Dictionary training will fail if there are not enough samples to construct a\n dictionary, or if most of the samples are too small (< 8 bytes being the lower limit).\n If dictionary training fails, you should use zstd without a dictionary, as the dictionary\n would've been ineffective anyways. If you believe your samples would benefit from a dictionary\n please open an issue with details, and we can look into it.\n Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB.\n Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\n It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\n In general, it's recommended to provide a few thousands samples, though this can vary a lot.\n It's recommended that total size of all samples be about ~x100 times the target size of dictionary."] pub fn ZDICT_trainFromBuffer( dictBuffer: *mut ::core::ffi::c_void, dictBufferCapacity: usize, samplesBuffer: *const ::core::ffi::c_void, samplesSizes: *const usize, nbSamples: ::core::ffi::c_uint, ) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZDICT_params_t { #[doc = "< optimize for a specific zstd compression level; 0 means default"] pub compressionLevel: ::core::ffi::c_int, #[doc = "< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug;"] pub notificationLevel: ::core::ffi::c_uint, #[doc = "< force dictID value; 0 means auto mode (32-bits random value)\n NOTE: The zstd format reserves some dictionary IDs for future use.\n You may use them in private settings, but be warned that they\n may be used by zstd in a public dictionary registry in the future.\n These dictionary IDs are:\n - low range : <= 32767\n - high range : >= (2^31)"] pub dictID: ::core::ffi::c_uint, } extern "C" { #[doc = " ZDICT_finalizeDictionary():\n Given a custom content as a basis for dictionary, and a set of samples,\n finalize dictionary by adding headers and statistics according to the zstd\n dictionary format.\n\n Samples must be stored concatenated in a flat buffer `samplesBuffer`,\n supplied with an array of sizes `samplesSizes`, providing the size of each\n sample in order. The samples are used to construct the statistics, so they\n should be representative of what you will compress with this dictionary.\n\n The compression level can be set in `parameters`. You should pass the\n compression level you expect to use in production. The statistics for each\n compression level differ, so tuning the dictionary for the compression level\n can help quite a bit.\n\n You can set an explicit dictionary ID in `parameters`, or allow us to pick\n a random dictionary ID for you, but we can't guarantee no collisions.\n\n The dstDictBuffer and the dictContent may overlap, and the content will be\n appended to the end of the header. If the header + the content doesn't fit in\n maxDictSize the beginning of the content is truncated to make room, since it\n is presumed that the most profitable content is at the end of the dictionary,\n since that is the cheapest to reference.\n\n `maxDictSize` must be >= max(dictContentSize, ZSTD_DICTSIZE_MIN).\n\n @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`),\n or an error code, which can be tested by ZDICT_isError().\n Note: ZDICT_finalizeDictionary() will push notifications into stderr if\n instructed to, using notificationLevel>0.\n NOTE: This function currently may fail in several edge cases including:\n * Not enough samples\n * Samples are uncompressible\n * Samples are all exactly the same"] pub fn ZDICT_finalizeDictionary( dstDictBuffer: *mut ::core::ffi::c_void, maxDictSize: usize, dictContent: *const ::core::ffi::c_void, dictContentSize: usize, samplesBuffer: *const ::core::ffi::c_void, samplesSizes: *const usize, nbSamples: ::core::ffi::c_uint, parameters: ZDICT_params_t, ) -> usize; } extern "C" { pub fn ZDICT_getDictID( dictBuffer: *const ::core::ffi::c_void, dictSize: usize, ) -> ::core::ffi::c_uint; } extern "C" { pub fn ZDICT_getDictHeaderSize( dictBuffer: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } extern "C" { pub fn ZDICT_isError(errorCode: usize) -> ::core::ffi::c_uint; } extern "C" { pub fn ZDICT_getErrorName(errorCode: usize) -> *const ::core::ffi::c_char; } #[doc = " ZDICT_cover_params_t:\n k and d are the only required parameters.\n For others, value 0 means default."] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZDICT_cover_params_t { pub k: ::core::ffi::c_uint, pub d: ::core::ffi::c_uint, pub steps: ::core::ffi::c_uint, pub nbThreads: ::core::ffi::c_uint, pub splitPoint: f64, pub shrinkDict: ::core::ffi::c_uint, pub shrinkDictMaxRegression: ::core::ffi::c_uint, pub zParams: ZDICT_params_t, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZDICT_fastCover_params_t { pub k: ::core::ffi::c_uint, pub d: ::core::ffi::c_uint, pub f: ::core::ffi::c_uint, pub steps: ::core::ffi::c_uint, pub nbThreads: ::core::ffi::c_uint, pub splitPoint: f64, pub accel: ::core::ffi::c_uint, pub shrinkDict: ::core::ffi::c_uint, pub shrinkDictMaxRegression: ::core::ffi::c_uint, pub zParams: ZDICT_params_t, } extern "C" { #[doc = " ZDICT_trainFromBuffer_cover():\n Train a dictionary from an array of samples using the COVER algorithm.\n Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\n supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\n The resulting dictionary will be saved into `dictBuffer`.\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n or an error code, which can be tested with ZDICT_isError().\n See ZDICT_trainFromBuffer() for details on failure modes.\n Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte.\n Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\n It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\n In general, it's recommended to provide a few thousands samples, though this can vary a lot.\n It's recommended that total size of all samples be about ~x100 times the target size of dictionary."] pub fn ZDICT_trainFromBuffer_cover( dictBuffer: *mut ::core::ffi::c_void, dictBufferCapacity: usize, samplesBuffer: *const ::core::ffi::c_void, samplesSizes: *const usize, nbSamples: ::core::ffi::c_uint, parameters: ZDICT_cover_params_t, ) -> usize; } extern "C" { #[doc = " ZDICT_optimizeTrainFromBuffer_cover():\n The same requirements as above hold for all the parameters except `parameters`.\n This function tries many parameter combinations and picks the best parameters.\n `*parameters` is filled with the best parameters found,\n dictionary constructed with those parameters is stored in `dictBuffer`.\n\n All of the parameters d, k, steps are optional.\n If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.\n if steps is zero it defaults to its default value.\n If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].\n\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n or an error code, which can be tested with ZDICT_isError().\n On success `*parameters` contains the parameters selected.\n See ZDICT_trainFromBuffer() for details on failure modes.\n Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread."] pub fn ZDICT_optimizeTrainFromBuffer_cover( dictBuffer: *mut ::core::ffi::c_void, dictBufferCapacity: usize, samplesBuffer: *const ::core::ffi::c_void, samplesSizes: *const usize, nbSamples: ::core::ffi::c_uint, parameters: *mut ZDICT_cover_params_t, ) -> usize; } extern "C" { #[doc = " ZDICT_trainFromBuffer_fastCover():\n Train a dictionary from an array of samples using a modified version of COVER algorithm.\n Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\n supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\n d and k are required.\n All other parameters are optional, will use default values if not provided\n The resulting dictionary will be saved into `dictBuffer`.\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n or an error code, which can be tested with ZDICT_isError().\n See ZDICT_trainFromBuffer() for details on failure modes.\n Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory.\n Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\n It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\n In general, it's recommended to provide a few thousands samples, though this can vary a lot.\n It's recommended that total size of all samples be about ~x100 times the target size of dictionary."] pub fn ZDICT_trainFromBuffer_fastCover( dictBuffer: *mut ::core::ffi::c_void, dictBufferCapacity: usize, samplesBuffer: *const ::core::ffi::c_void, samplesSizes: *const usize, nbSamples: ::core::ffi::c_uint, parameters: ZDICT_fastCover_params_t, ) -> usize; } extern "C" { #[doc = " ZDICT_optimizeTrainFromBuffer_fastCover():\n The same requirements as above hold for all the parameters except `parameters`.\n This function tries many parameter combinations (specifically, k and d combinations)\n and picks the best parameters. `*parameters` is filled with the best parameters found,\n dictionary constructed with those parameters is stored in `dictBuffer`.\n All of the parameters d, k, steps, f, and accel are optional.\n If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.\n if steps is zero it defaults to its default value.\n If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].\n If f is zero, default value of 20 is used.\n If accel is zero, default value of 1 is used.\n\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n or an error code, which can be tested with ZDICT_isError().\n On success `*parameters` contains the parameters selected.\n See ZDICT_trainFromBuffer() for details on failure modes.\n Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread."] pub fn ZDICT_optimizeTrainFromBuffer_fastCover( dictBuffer: *mut ::core::ffi::c_void, dictBufferCapacity: usize, samplesBuffer: *const ::core::ffi::c_void, samplesSizes: *const usize, nbSamples: ::core::ffi::c_uint, parameters: *mut ZDICT_fastCover_params_t, ) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZDICT_legacy_params_t { pub selectivityLevel: ::core::ffi::c_uint, pub zParams: ZDICT_params_t, } extern "C" { #[doc = " ZDICT_trainFromBuffer_legacy():\n Train a dictionary from an array of samples.\n Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\n supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\n The resulting dictionary will be saved into `dictBuffer`.\n `parameters` is optional and can be provided with values set to 0 to mean \"default\".\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n or an error code, which can be tested with ZDICT_isError().\n See ZDICT_trainFromBuffer() for details on failure modes.\n Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\n It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\n In general, it's recommended to provide a few thousands samples, though this can vary a lot.\n It's recommended that total size of all samples be about ~x100 times the target size of dictionary.\n Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0."] pub fn ZDICT_trainFromBuffer_legacy( dictBuffer: *mut ::core::ffi::c_void, dictBufferCapacity: usize, samplesBuffer: *const ::core::ffi::c_void, samplesSizes: *const usize, nbSamples: ::core::ffi::c_uint, parameters: ZDICT_legacy_params_t, ) -> usize; } extern "C" { pub fn ZDICT_addEntropyTablesFromBuffer( dictBuffer: *mut ::core::ffi::c_void, dictContentSize: usize, dictBufferCapacity: usize, samplesBuffer: *const ::core::ffi::c_void, samplesSizes: *const usize, nbSamples: ::core::ffi::c_uint, ) -> usize; }
/* This file is auto-generated from the public API of the zstd library. It is released under the same BSD license. BSD License For Zstandard software Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name Facebook, nor Meta, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* automatically generated by rust-bindgen 0.71.1 */ pub const ZSTD_VERSION_MAJOR: u32 = 1; pub const ZSTD_VERSION_MINOR: u32 = 5; pub const ZSTD_VERSION_RELEASE: u32 = 7; pub const ZSTD_VERSION_NUMBER: u32 = 10507; pub const ZSTD_CLEVEL_DEFAULT: u32 = 3; pub const ZSTD_MAGICNUMBER: u32 = 4247762216; pub const ZSTD_MAGIC_DICTIONARY: u32 = 3962610743; pub const ZSTD_MAGIC_SKIPPABLE_START: u32 = 407710288; pub const ZSTD_MAGIC_SKIPPABLE_MASK: u32 = 4294967280; pub const ZSTD_BLOCKSIZELOG_MAX: u32 = 17; pub const ZSTD_BLOCKSIZE_MAX: u32 = 131072; pub const ZSTD_CONTENTSIZE_UNKNOWN: i32 = -1; pub const ZSTD_CONTENTSIZE_ERROR: i32 = -2; #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_ErrorCode { ZSTD_error_no_error = 0, ZSTD_error_GENERIC = 1, ZSTD_error_prefix_unknown = 10, ZSTD_error_version_unsupported = 12, ZSTD_error_frameParameter_unsupported = 14, ZSTD_error_frameParameter_windowTooLarge = 16, ZSTD_error_corruption_detected = 20, ZSTD_error_checksum_wrong = 22, ZSTD_error_literals_headerWrong = 24, ZSTD_error_dictionary_corrupted = 30, ZSTD_error_dictionary_wrong = 32, ZSTD_error_dictionaryCreation_failed = 34, ZSTD_error_parameter_unsupported = 40, ZSTD_error_parameter_combination_unsupported = 41, ZSTD_error_parameter_outOfBound = 42, ZSTD_error_tableLog_tooLarge = 44, ZSTD_error_maxSymbolValue_tooLarge = 46, ZSTD_error_maxSymbolValue_tooSmall = 48, ZSTD_error_cannotProduce_uncompressedBlock = 49, ZSTD_error_stabilityCondition_notRespected = 50, ZSTD_error_stage_wrong = 60, ZSTD_error_init_missing = 62, ZSTD_error_memory_allocation = 64, ZSTD_error_workSpace_tooSmall = 66, ZSTD_error_dstSize_tooSmall = 70, ZSTD_error_srcSize_wrong = 72, ZSTD_error_dstBuffer_null = 74, ZSTD_error_noForwardProgress_destFull = 80, ZSTD_error_noForwardProgress_inputEmpty = 82, ZSTD_error_frameIndex_tooLarge = 100, ZSTD_error_seekableIO = 102, ZSTD_error_dstBuffer_wrong = 104, ZSTD_error_srcBuffer_wrong = 105, ZSTD_error_sequenceProducer_failed = 106, ZSTD_error_externalSequences_invalid = 107, ZSTD_error_maxCode = 120, } extern "C" { pub fn ZSTD_getErrorString( code: ZSTD_ErrorCode, ) -> *const ::core::ffi::c_char; } extern "C" { #[doc = " ZSTD_versionNumber() :\n Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE)."] pub fn ZSTD_versionNumber() -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_versionString() :\n Return runtime library version, like \"1.4.5\". Requires v1.3.0+."] pub fn ZSTD_versionString() -> *const ::core::ffi::c_char; } extern "C" { #[doc = " Simple Core API\n/\n/*! ZSTD_compress() :\n Compresses `src` content as a single zstd compressed frame into already allocated `dst`.\n NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have\n enough space to successfully compress the data.\n @return : compressed size written into `dst` (<= `dstCapacity),\n or an error code if it fails (which can be tested using ZSTD_isError())."] pub fn ZSTD_compress( dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_decompress() :\n `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.\n Multiple compressed frames can be decompressed at once with this method.\n The result will be the concatenation of all decompressed frames, back to back.\n `dstCapacity` is an upper bound of originalSize to regenerate.\n First frame's decompressed size can be extracted using ZSTD_getFrameContentSize().\n If maximum upper bound isn't known, prefer using streaming mode to decompress data.\n @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),\n or an errorCode if it fails (which can be tested using ZSTD_isError())."] pub fn ZSTD_decompress( dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, compressedSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_getFrameContentSize( src: *const ::core::ffi::c_void, srcSize: usize, ) -> ::core::ffi::c_ulonglong; } extern "C" { #[doc = " ZSTD_getDecompressedSize() (obsolete):\n This function is now obsolete, in favor of ZSTD_getFrameContentSize().\n Both functions work the same way, but ZSTD_getDecompressedSize() blends\n \"empty\", \"unknown\" and \"error\" results to the same return value (0),\n while ZSTD_getFrameContentSize() gives them separate return values.\n @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise."] pub fn ZSTD_getDecompressedSize( src: *const ::core::ffi::c_void, srcSize: usize, ) -> ::core::ffi::c_ulonglong; } extern "C" { #[doc = " ZSTD_findFrameCompressedSize() : Requires v1.4.0+\n `src` should point to the start of a ZSTD frame or skippable frame.\n `srcSize` must be >= first frame size\n @return : the compressed size of the first frame starting at `src`,\n suitable to pass as `srcSize` to `ZSTD_decompress` or similar,\n or an error code if input is invalid\n Note 1: this method is called _find*() because it's not enough to read the header,\n it may have to scan through the frame's content, to reach its end.\n Note 2: this method also works with Skippable Frames. In which case,\n it returns the size of the complete skippable frame,\n which is always equal to its content size + 8 bytes for headers."] pub fn ZSTD_findFrameCompressedSize( src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_compressBound(srcSize: usize) -> usize; } extern "C" { pub fn ZSTD_isError(result: usize) -> ::core::ffi::c_uint; } extern "C" { pub fn ZSTD_getErrorCode(functionResult: usize) -> ZSTD_ErrorCode; } extern "C" { pub fn ZSTD_getErrorName(result: usize) -> *const ::core::ffi::c_char; } extern "C" { pub fn ZSTD_minCLevel() -> ::core::ffi::c_int; } extern "C" { pub fn ZSTD_maxCLevel() -> ::core::ffi::c_int; } extern "C" { pub fn ZSTD_defaultCLevel() -> ::core::ffi::c_int; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_CCtx_s { _unused: [u8; 0], } #[doc = " Explicit context"] pub type ZSTD_CCtx = ZSTD_CCtx_s; extern "C" { pub fn ZSTD_createCCtx() -> *mut ZSTD_CCtx; } extern "C" { pub fn ZSTD_freeCCtx(cctx: *mut ZSTD_CCtx) -> usize; } extern "C" { #[doc = " ZSTD_compressCCtx() :\n Same as ZSTD_compress(), using an explicit ZSTD_CCtx.\n Important : in order to mirror `ZSTD_compress()` behavior,\n this function compresses at the requested compression level,\n __ignoring any other advanced parameter__ .\n If any advanced parameter was set using the advanced API,\n they will all be reset. Only @compressionLevel remains."] pub fn ZSTD_compressCCtx( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, compressionLevel: ::core::ffi::c_int, ) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_DCtx_s { _unused: [u8; 0], } pub type ZSTD_DCtx = ZSTD_DCtx_s; extern "C" { pub fn ZSTD_createDCtx() -> *mut ZSTD_DCtx; } extern "C" { pub fn ZSTD_freeDCtx(dctx: *mut ZSTD_DCtx) -> usize; } extern "C" { #[doc = " ZSTD_decompressDCtx() :\n Same as ZSTD_decompress(),\n requires an allocated ZSTD_DCtx.\n Compatible with sticky parameters (see below)."] pub fn ZSTD_decompressDCtx( dctx: *mut ZSTD_DCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } #[repr(u32)] #[doc = " Advanced compression API (Requires v1.4.0+)"] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_strategy { ZSTD_fast = 1, ZSTD_dfast = 2, ZSTD_greedy = 3, ZSTD_lazy = 4, ZSTD_lazy2 = 5, ZSTD_btlazy2 = 6, ZSTD_btopt = 7, ZSTD_btultra = 8, ZSTD_btultra2 = 9, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_cParameter { ZSTD_c_compressionLevel = 100, ZSTD_c_windowLog = 101, ZSTD_c_hashLog = 102, ZSTD_c_chainLog = 103, ZSTD_c_searchLog = 104, ZSTD_c_minMatch = 105, ZSTD_c_targetLength = 106, ZSTD_c_strategy = 107, ZSTD_c_targetCBlockSize = 130, ZSTD_c_enableLongDistanceMatching = 160, ZSTD_c_ldmHashLog = 161, ZSTD_c_ldmMinMatch = 162, ZSTD_c_ldmBucketSizeLog = 163, ZSTD_c_ldmHashRateLog = 164, ZSTD_c_contentSizeFlag = 200, ZSTD_c_checksumFlag = 201, ZSTD_c_dictIDFlag = 202, ZSTD_c_nbWorkers = 400, ZSTD_c_jobSize = 401, ZSTD_c_overlapLog = 402, ZSTD_c_experimentalParam1 = 500, ZSTD_c_experimentalParam2 = 10, ZSTD_c_experimentalParam3 = 1000, ZSTD_c_experimentalParam4 = 1001, ZSTD_c_experimentalParam5 = 1002, ZSTD_c_experimentalParam7 = 1004, ZSTD_c_experimentalParam8 = 1005, ZSTD_c_experimentalParam9 = 1006, ZSTD_c_experimentalParam10 = 1007, ZSTD_c_experimentalParam11 = 1008, ZSTD_c_experimentalParam12 = 1009, ZSTD_c_experimentalParam13 = 1010, ZSTD_c_experimentalParam14 = 1011, ZSTD_c_experimentalParam15 = 1012, ZSTD_c_experimentalParam16 = 1013, ZSTD_c_experimentalParam17 = 1014, ZSTD_c_experimentalParam18 = 1015, ZSTD_c_experimentalParam19 = 1016, ZSTD_c_experimentalParam20 = 1017, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_bounds { pub error: usize, pub lowerBound: ::core::ffi::c_int, pub upperBound: ::core::ffi::c_int, } extern "C" { #[doc = " ZSTD_cParam_getBounds() :\n All parameters must belong to an interval with lower and upper bounds,\n otherwise they will either trigger an error or be automatically clamped.\n @return : a structure, ZSTD_bounds, which contains\n - an error status field, which must be tested using ZSTD_isError()\n - lower and upper bounds, both inclusive"] pub fn ZSTD_cParam_getBounds(cParam: ZSTD_cParameter) -> ZSTD_bounds; } extern "C" { #[doc = " ZSTD_CCtx_setParameter() :\n Set one compression parameter, selected by enum ZSTD_cParameter.\n All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds().\n Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).\n Setting a parameter is generally only possible during frame initialization (before starting compression).\n Exception : when using multi-threading mode (nbWorkers >= 1),\n the following parameters can be updated _during_ compression (within same frame):\n => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.\n new parameters will be active for next job only (after a flush()).\n @return : an error code (which can be tested using ZSTD_isError())."] pub fn ZSTD_CCtx_setParameter( cctx: *mut ZSTD_CCtx, param: ZSTD_cParameter, value: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_setPledgedSrcSize() :\n Total input data size to be compressed as a single frame.\n Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag.\n This value will also be controlled at end of frame, and trigger an error if not respected.\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame.\n In order to mean \"unknown content size\", pass constant ZSTD_CONTENTSIZE_UNKNOWN.\n ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame.\n Note 2 : pledgedSrcSize is only valid once, for the next frame.\n It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN.\n Note 3 : Whenever all input data is provided and consumed in a single round,\n for example with ZSTD_compress2(),\n or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),\n this value is automatically overridden by srcSize instead."] pub fn ZSTD_CCtx_setPledgedSrcSize( cctx: *mut ZSTD_CCtx, pledgedSrcSize: ::core::ffi::c_ulonglong, ) -> usize; } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_ResetDirective { ZSTD_reset_session_only = 1, ZSTD_reset_parameters = 2, ZSTD_reset_session_and_parameters = 3, } extern "C" { #[doc = " ZSTD_CCtx_reset() :\n There are 2 different things that can be reset, independently or jointly :\n - The session : will stop compressing current frame, and make CCtx ready to start a new one.\n Useful after an error, or to interrupt any ongoing compression.\n Any internal data not yet flushed is cancelled.\n Compression parameters and dictionary remain unchanged.\n They will be used to compress next frame.\n Resetting session never fails.\n - The parameters : changes all parameters back to \"default\".\n This also removes any reference to any dictionary or external sequence producer.\n Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)\n otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())\n - Both : similar to resetting the session, followed by resetting parameters."] pub fn ZSTD_CCtx_reset( cctx: *mut ZSTD_CCtx, reset: ZSTD_ResetDirective, ) -> usize; } extern "C" { #[doc = " ZSTD_compress2() :\n Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.\n (note that this entry point doesn't even expose a compression level parameter).\n ZSTD_compress2() always starts a new frame.\n Should cctx hold data from a previously unfinished frame, everything about it is forgotten.\n - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\n - The function is always blocking, returns when compression is completed.\n NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have\n enough space to successfully compress the data, though it is possible it fails for other reasons.\n @return : compressed size written into `dst` (<= `dstCapacity),\n or an error code if it fails (which can be tested using ZSTD_isError())."] pub fn ZSTD_compress2( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } #[repr(u32)] #[doc = " Advanced decompression API (Requires v1.4.0+)"] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_dParameter { ZSTD_d_windowLogMax = 100, ZSTD_d_experimentalParam1 = 1000, ZSTD_d_experimentalParam2 = 1001, ZSTD_d_experimentalParam3 = 1002, ZSTD_d_experimentalParam4 = 1003, ZSTD_d_experimentalParam5 = 1004, ZSTD_d_experimentalParam6 = 1005, } extern "C" { #[doc = " ZSTD_dParam_getBounds() :\n All parameters must belong to an interval with lower and upper bounds,\n otherwise they will either trigger an error or be automatically clamped.\n @return : a structure, ZSTD_bounds, which contains\n - an error status field, which must be tested using ZSTD_isError()\n - both lower and upper bounds, inclusive"] pub fn ZSTD_dParam_getBounds(dParam: ZSTD_dParameter) -> ZSTD_bounds; } extern "C" { #[doc = " ZSTD_DCtx_setParameter() :\n Set one compression parameter, selected by enum ZSTD_dParameter.\n All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().\n Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).\n Setting a parameter is only possible during frame initialization (before starting decompression).\n @return : 0, or an error code (which can be tested using ZSTD_isError())."] pub fn ZSTD_DCtx_setParameter( dctx: *mut ZSTD_DCtx, param: ZSTD_dParameter, value: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_reset() :\n Return a DCtx to clean state.\n Session and parameters can be reset jointly or separately.\n Parameters can only be reset when no active frame is being decompressed.\n @return : 0, or an error code, which can be tested with ZSTD_isError()"] pub fn ZSTD_DCtx_reset( dctx: *mut ZSTD_DCtx, reset: ZSTD_ResetDirective, ) -> usize; } #[doc = " Streaming"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_inBuffer_s { #[doc = "< start of input buffer"] pub src: *const ::core::ffi::c_void, #[doc = "< size of input buffer"] pub size: usize, #[doc = "< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size"] pub pos: usize, } #[doc = " Streaming"] pub type ZSTD_inBuffer = ZSTD_inBuffer_s; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_outBuffer_s { #[doc = "< start of output buffer"] pub dst: *mut ::core::ffi::c_void, #[doc = "< size of output buffer"] pub size: usize, #[doc = "< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size"] pub pos: usize, } pub type ZSTD_outBuffer = ZSTD_outBuffer_s; pub type ZSTD_CStream = ZSTD_CCtx; extern "C" { pub fn ZSTD_createCStream() -> *mut ZSTD_CStream; } extern "C" { pub fn ZSTD_freeCStream(zcs: *mut ZSTD_CStream) -> usize; } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_EndDirective { ZSTD_e_continue = 0, ZSTD_e_flush = 1, ZSTD_e_end = 2, } extern "C" { #[doc = " ZSTD_compressStream2() : Requires v1.4.0+\n Behaves about the same as ZSTD_compressStream, with additional control on end directive.\n - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\n - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)\n - output->pos must be <= dstCapacity, input->pos must be <= srcSize\n - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.\n - endOp must be a valid directive\n - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.\n - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,\n and then immediately returns, just indicating that there is some data remaining to be flushed.\n The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.\n - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.\n - @return provides a minimum amount of data remaining to be flushed from internal buffers\n or an error code, which can be tested using ZSTD_isError().\n if @return != 0, flush is not fully completed, there is still some data left within internal buffers.\n This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.\n For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.\n - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),\n only ZSTD_e_end or ZSTD_e_flush operations are allowed.\n Before starting a new compression job, or changing compression parameters,\n it is required to fully flush internal buffers.\n - note: if an operation ends with an error, it may leave @cctx in an undefined state.\n Therefore, it's UB to invoke ZSTD_compressStream2() of ZSTD_compressStream() on such a state.\n In order to be re-employed after an error, a state must be reset,\n which can be done explicitly (ZSTD_CCtx_reset()),\n or is sometimes implied by methods starting a new compression job (ZSTD_initCStream(), ZSTD_compressCCtx())"] pub fn ZSTD_compressStream2( cctx: *mut ZSTD_CCtx, output: *mut ZSTD_outBuffer, input: *mut ZSTD_inBuffer, endOp: ZSTD_EndDirective, ) -> usize; } extern "C" { pub fn ZSTD_CStreamInSize() -> usize; } extern "C" { pub fn ZSTD_CStreamOutSize() -> usize; } extern "C" { #[doc = " Equivalent to:\n\n ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)\n ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);\n\n Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API\n to compress with a dictionary."] pub fn ZSTD_initCStream( zcs: *mut ZSTD_CStream, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).\n NOTE: The return value is different. ZSTD_compressStream() returns a hint for\n the next read size (if non-zero and not an error). ZSTD_compressStream2()\n returns the minimum nb of bytes left to flush (if non-zero and not an error)."] pub fn ZSTD_compressStream( zcs: *mut ZSTD_CStream, output: *mut ZSTD_outBuffer, input: *mut ZSTD_inBuffer, ) -> usize; } extern "C" { #[doc = " Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush)."] pub fn ZSTD_flushStream( zcs: *mut ZSTD_CStream, output: *mut ZSTD_outBuffer, ) -> usize; } extern "C" { #[doc = " Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end)."] pub fn ZSTD_endStream( zcs: *mut ZSTD_CStream, output: *mut ZSTD_outBuffer, ) -> usize; } pub type ZSTD_DStream = ZSTD_DCtx; extern "C" { pub fn ZSTD_createDStream() -> *mut ZSTD_DStream; } extern "C" { pub fn ZSTD_freeDStream(zds: *mut ZSTD_DStream) -> usize; } extern "C" { #[doc = " ZSTD_initDStream() :\n Initialize/reset DStream state for new decompression operation.\n Call before new decompression operation using same DStream.\n\n Note : This function is redundant with the advanced API and equivalent to:\n ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\n ZSTD_DCtx_refDDict(zds, NULL);"] pub fn ZSTD_initDStream(zds: *mut ZSTD_DStream) -> usize; } extern "C" { #[doc = " ZSTD_decompressStream() :\n Streaming decompression function.\n Call repetitively to consume full input updating it as necessary.\n Function will update both input and output `pos` fields exposing current state via these fields:\n - `input.pos < input.size`, some input remaining and caller should provide remaining input\n on the next call.\n - `output.pos < output.size`, decoder flushed internal output buffer.\n - `output.pos == output.size`, unflushed data potentially present in the internal buffers,\n check ZSTD_decompressStream() @return value,\n if > 0, invoke it again to flush remaining data to output.\n Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX.\n\n @return : 0 when a frame is completely decoded and fully flushed,\n or an error code, which can be tested using ZSTD_isError(),\n or any other value > 0, which means there is some decoding or flushing to do to complete current frame.\n\n Note: when an operation returns with an error code, the @zds state may be left in undefined state.\n It's UB to invoke `ZSTD_decompressStream()` on such a state.\n In order to re-use such a state, it must be first reset,\n which can be done explicitly (`ZSTD_DCtx_reset()`),\n or is implied for operations starting some new decompression job (`ZSTD_initDStream`, `ZSTD_decompressDCtx()`, `ZSTD_decompress_usingDict()`)"] pub fn ZSTD_decompressStream( zds: *mut ZSTD_DStream, output: *mut ZSTD_outBuffer, input: *mut ZSTD_inBuffer, ) -> usize; } extern "C" { pub fn ZSTD_DStreamInSize() -> usize; } extern "C" { pub fn ZSTD_DStreamOutSize() -> usize; } extern "C" { #[doc = " Simple dictionary API\n/\n/*! ZSTD_compress_usingDict() :\n Compression at an explicit compression level using a Dictionary.\n A dictionary can be any arbitrary data segment (also called a prefix),\n or a buffer with specified information (see zdict.h).\n Note : This function loads the dictionary, resulting in significant startup delay.\n It's intended for a dictionary used only once.\n Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used."] pub fn ZSTD_compress_usingDict( ctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, dict: *const ::core::ffi::c_void, dictSize: usize, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_decompress_usingDict() :\n Decompression using a known Dictionary.\n Dictionary must be identical to the one used during compression.\n Note : This function loads the dictionary, resulting in significant startup delay.\n It's intended for a dictionary used only once.\n Note : When `dict == NULL || dictSize < 8` no dictionary is used."] pub fn ZSTD_decompress_usingDict( dctx: *mut ZSTD_DCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, dict: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_CDict_s { _unused: [u8; 0], } #[doc = " Bulk processing dictionary API"] pub type ZSTD_CDict = ZSTD_CDict_s; extern "C" { #[doc = " ZSTD_createCDict() :\n When compressing multiple messages or blocks using the same dictionary,\n it's recommended to digest the dictionary only once, since it's a costly operation.\n ZSTD_createCDict() will create a state from digesting a dictionary.\n The resulting state can be used for future compression operations with very limited startup cost.\n ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.\n @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.\n Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.\n Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,\n in which case the only thing that it transports is the @compressionLevel.\n This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,\n expecting a ZSTD_CDict parameter with any data, including those without a known dictionary."] pub fn ZSTD_createCDict( dictBuffer: *const ::core::ffi::c_void, dictSize: usize, compressionLevel: ::core::ffi::c_int, ) -> *mut ZSTD_CDict; } extern "C" { #[doc = " ZSTD_freeCDict() :\n Function frees memory allocated by ZSTD_createCDict().\n If a NULL pointer is passed, no operation is performed."] pub fn ZSTD_freeCDict(CDict: *mut ZSTD_CDict) -> usize; } extern "C" { #[doc = " ZSTD_compress_usingCDict() :\n Compression using a digested Dictionary.\n Recommended when same dictionary is used multiple times.\n Note : compression level is _decided at dictionary creation time_,\n and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no)"] pub fn ZSTD_compress_usingCDict( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, cdict: *const ZSTD_CDict, ) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_DDict_s { _unused: [u8; 0], } pub type ZSTD_DDict = ZSTD_DDict_s; extern "C" { #[doc = " ZSTD_createDDict() :\n Create a digested dictionary, ready to start decompression operation without startup delay.\n dictBuffer can be released after DDict creation, as its content is copied inside DDict."] pub fn ZSTD_createDDict( dictBuffer: *const ::core::ffi::c_void, dictSize: usize, ) -> *mut ZSTD_DDict; } extern "C" { #[doc = " ZSTD_freeDDict() :\n Function frees memory allocated with ZSTD_createDDict()\n If a NULL pointer is passed, no operation is performed."] pub fn ZSTD_freeDDict(ddict: *mut ZSTD_DDict) -> usize; } extern "C" { #[doc = " ZSTD_decompress_usingDDict() :\n Decompression using a digested Dictionary.\n Recommended when same dictionary is used multiple times."] pub fn ZSTD_decompress_usingDDict( dctx: *mut ZSTD_DCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ddict: *const ZSTD_DDict, ) -> usize; } extern "C" { #[doc = " ZSTD_getDictID_fromDict() : Requires v1.4.0+\n Provides the dictID stored within dictionary.\n if @return == 0, the dictionary is not conformant with Zstandard specification.\n It can still be loaded, but as a content-only dictionary."] pub fn ZSTD_getDictID_fromDict( dict: *const ::core::ffi::c_void, dictSize: usize, ) -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_getDictID_fromCDict() : Requires v1.5.0+\n Provides the dictID of the dictionary loaded into `cdict`.\n If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.\n Non-conformant dictionaries can still be loaded, but as content-only dictionaries."] pub fn ZSTD_getDictID_fromCDict( cdict: *const ZSTD_CDict, ) -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_getDictID_fromDDict() : Requires v1.4.0+\n Provides the dictID of the dictionary loaded into `ddict`.\n If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.\n Non-conformant dictionaries can still be loaded, but as content-only dictionaries."] pub fn ZSTD_getDictID_fromDDict( ddict: *const ZSTD_DDict, ) -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_getDictID_fromFrame() : Requires v1.4.0+\n Provides the dictID required to decompressed the frame stored within `src`.\n If @return == 0, the dictID could not be decoded.\n This could for one of the following reasons :\n - The frame does not require a dictionary to be decoded (most common case).\n - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden piece of information.\n Note : this use case also happens when using a non-conformant dictionary.\n - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).\n - This is not a Zstandard frame.\n When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code."] pub fn ZSTD_getDictID_fromFrame( src: *const ::core::ffi::c_void, srcSize: usize, ) -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_CCtx_loadDictionary() : Requires v1.4.0+\n Create an internal CDict from `dict` buffer.\n Decompression will have to use same dictionary.\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,\n meaning \"return to no-dictionary mode\".\n Note 1 : Dictionary is sticky, it will be used for all future compressed frames,\n until parameters are reset, a new dictionary is loaded, or the dictionary\n is explicitly invalidated by loading a NULL dictionary.\n Note 2 : Loading a dictionary involves building tables.\n It's also a CPU consuming operation, with non-negligible impact on latency.\n Tables are dependent on compression parameters, and for this reason,\n compression parameters can no longer be changed after loading a dictionary.\n Note 3 :`dict` content will be copied internally.\n Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.\n In such a case, dictionary buffer must outlive its users.\n Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()\n to precisely select how dictionary content must be interpreted.\n Note 5 : This method does not benefit from LDM (long distance mode).\n If you want to employ LDM on some large dictionary content,\n prefer employing ZSTD_CCtx_refPrefix() described below."] pub fn ZSTD_CCtx_loadDictionary( cctx: *mut ZSTD_CCtx, dict: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_refCDict() : Requires v1.4.0+\n Reference a prepared dictionary, to be used for all future compressed frames.\n Note that compression parameters are enforced from within CDict,\n and supersede any compression parameter previously set within CCtx.\n The parameters ignored are labelled as \"superseded-by-cdict\" in the ZSTD_cParameter enum docs.\n The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.\n The dictionary will remain valid for future compressed frames using same CCtx.\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Special : Referencing a NULL CDict means \"return to no-dictionary mode\".\n Note 1 : Currently, only one dictionary can be managed.\n Referencing a new dictionary effectively \"discards\" any previous one.\n Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx."] pub fn ZSTD_CCtx_refCDict( cctx: *mut ZSTD_CCtx, cdict: *const ZSTD_CDict, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_refPrefix() : Requires v1.4.0+\n Reference a prefix (single-usage dictionary) for next compressed frame.\n A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).\n Decompression will need same prefix to properly regenerate data.\n Compressing with a prefix is similar in outcome as performing a diff and compressing it,\n but performs much faster, especially during decompression (compression speed is tunable with compression level).\n This method is compatible with LDM (long distance mode).\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary\n Note 1 : Prefix buffer is referenced. It **must** outlive compression.\n Its content must remain unmodified during compression.\n Note 2 : If the intention is to diff some large src data blob with some prior version of itself,\n ensure that the window size is large enough to contain the entire source.\n See ZSTD_c_windowLog.\n Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.\n It's a CPU consuming operation, with non-negligible impact on latency.\n If there is a need to use the same prefix multiple times, consider loadDictionary instead.\n Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).\n Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation."] pub fn ZSTD_CCtx_refPrefix( cctx: *mut ZSTD_CCtx, prefix: *const ::core::ffi::c_void, prefixSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_loadDictionary() : Requires v1.4.0+\n Create an internal DDict from dict buffer, to be used to decompress all future frames.\n The dictionary remains valid for all future frames, until explicitly invalidated, or\n a new dictionary is loaded.\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,\n meaning \"return to no-dictionary mode\".\n Note 1 : Loading a dictionary involves building tables,\n which has a non-negligible impact on CPU usage and latency.\n It's recommended to \"load once, use many times\", to amortize the cost\n Note 2 :`dict` content will be copied internally, so `dict` can be released after loading.\n Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead.\n Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of\n how dictionary content is loaded and interpreted."] pub fn ZSTD_DCtx_loadDictionary( dctx: *mut ZSTD_DCtx, dict: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_refDDict() : Requires v1.4.0+\n Reference a prepared dictionary, to be used to decompress next frames.\n The dictionary remains active for decompression of future frames using same DCtx.\n\n If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function\n will store the DDict references in a table, and the DDict used for decompression\n will be determined at decompression time, as per the dict ID in the frame.\n The memory for the table is allocated on the first call to refDDict, and can be\n freed with ZSTD_freeDCtx().\n\n If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary\n will be managed, and referencing a dictionary effectively \"discards\" any previous one.\n\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Special: referencing a NULL DDict means \"return to no-dictionary mode\".\n Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx."] pub fn ZSTD_DCtx_refDDict( dctx: *mut ZSTD_DCtx, ddict: *const ZSTD_DDict, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_refPrefix() : Requires v1.4.0+\n Reference a prefix (single-usage dictionary) to decompress next frame.\n This is the reverse operation of ZSTD_CCtx_refPrefix(),\n and must use the same prefix as the one used during compression.\n Prefix is **only used once**. Reference is discarded at end of frame.\n End of frame is reached when ZSTD_decompressStream() returns 0.\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary\n Note 2 : Prefix buffer is referenced. It **must** outlive decompression.\n Prefix buffer must remain unmodified up to the end of frame,\n reached when ZSTD_decompressStream() returns 0.\n Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).\n Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)\n Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.\n A full dictionary is more costly, as it requires building tables."] pub fn ZSTD_DCtx_refPrefix( dctx: *mut ZSTD_DCtx, prefix: *const ::core::ffi::c_void, prefixSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_sizeof_*() : Requires v1.4.0+\n These functions give the _current_ memory usage of selected object.\n Note that object memory usage can evolve (increase or decrease) over time."] pub fn ZSTD_sizeof_CCtx(cctx: *const ZSTD_CCtx) -> usize; } extern "C" { pub fn ZSTD_sizeof_DCtx(dctx: *const ZSTD_DCtx) -> usize; } extern "C" { pub fn ZSTD_sizeof_CStream(zcs: *const ZSTD_CStream) -> usize; } extern "C" { pub fn ZSTD_sizeof_DStream(zds: *const ZSTD_DStream) -> usize; } extern "C" { pub fn ZSTD_sizeof_CDict(cdict: *const ZSTD_CDict) -> usize; } extern "C" { pub fn ZSTD_sizeof_DDict(ddict: *const ZSTD_DDict) -> usize; }
/* This file is auto-generated from the public API of the zstd library. It is released under the same BSD license. BSD License For Zstandard software Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name Facebook, nor Meta, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* automatically generated by rust-bindgen 0.71.1 */ pub const ZSTD_VERSION_MAJOR: u32 = 1; pub const ZSTD_VERSION_MINOR: u32 = 5; pub const ZSTD_VERSION_RELEASE: u32 = 7; pub const ZSTD_VERSION_NUMBER: u32 = 10507; pub const ZSTD_CLEVEL_DEFAULT: u32 = 3; pub const ZSTD_MAGICNUMBER: u32 = 4247762216; pub const ZSTD_MAGIC_DICTIONARY: u32 = 3962610743; pub const ZSTD_MAGIC_SKIPPABLE_START: u32 = 407710288; pub const ZSTD_MAGIC_SKIPPABLE_MASK: u32 = 4294967280; pub const ZSTD_BLOCKSIZELOG_MAX: u32 = 17; pub const ZSTD_BLOCKSIZE_MAX: u32 = 131072; pub const ZSTD_CONTENTSIZE_UNKNOWN: i32 = -1; pub const ZSTD_CONTENTSIZE_ERROR: i32 = -2; pub const ZSTD_FRAMEHEADERSIZE_MAX: u32 = 18; pub const ZSTD_SKIPPABLEHEADERSIZE: u32 = 8; pub const ZSTD_WINDOWLOG_MAX_32: u32 = 30; pub const ZSTD_WINDOWLOG_MAX_64: u32 = 31; pub const ZSTD_WINDOWLOG_MIN: u32 = 10; pub const ZSTD_HASHLOG_MIN: u32 = 6; pub const ZSTD_CHAINLOG_MAX_32: u32 = 29; pub const ZSTD_CHAINLOG_MAX_64: u32 = 30; pub const ZSTD_CHAINLOG_MIN: u32 = 6; pub const ZSTD_SEARCHLOG_MIN: u32 = 1; pub const ZSTD_MINMATCH_MAX: u32 = 7; pub const ZSTD_MINMATCH_MIN: u32 = 3; pub const ZSTD_TARGETLENGTH_MAX: u32 = 131072; pub const ZSTD_TARGETLENGTH_MIN: u32 = 0; pub const ZSTD_BLOCKSIZE_MAX_MIN: u32 = 1024; pub const ZSTD_OVERLAPLOG_MIN: u32 = 0; pub const ZSTD_OVERLAPLOG_MAX: u32 = 9; pub const ZSTD_WINDOWLOG_LIMIT_DEFAULT: u32 = 27; pub const ZSTD_LDM_HASHLOG_MIN: u32 = 6; pub const ZSTD_LDM_MINMATCH_MIN: u32 = 4; pub const ZSTD_LDM_MINMATCH_MAX: u32 = 4096; pub const ZSTD_LDM_BUCKETSIZELOG_MIN: u32 = 1; pub const ZSTD_LDM_BUCKETSIZELOG_MAX: u32 = 8; pub const ZSTD_LDM_HASHRATELOG_MIN: u32 = 0; pub const ZSTD_TARGETCBLOCKSIZE_MIN: u32 = 1340; pub const ZSTD_TARGETCBLOCKSIZE_MAX: u32 = 131072; pub const ZSTD_SRCSIZEHINT_MIN: u32 = 0; pub const ZSTD_BLOCKSPLITTER_LEVEL_MAX: u32 = 6; #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_ErrorCode { ZSTD_error_no_error = 0, ZSTD_error_GENERIC = 1, ZSTD_error_prefix_unknown = 10, ZSTD_error_version_unsupported = 12, ZSTD_error_frameParameter_unsupported = 14, ZSTD_error_frameParameter_windowTooLarge = 16, ZSTD_error_corruption_detected = 20, ZSTD_error_checksum_wrong = 22, ZSTD_error_literals_headerWrong = 24, ZSTD_error_dictionary_corrupted = 30, ZSTD_error_dictionary_wrong = 32, ZSTD_error_dictionaryCreation_failed = 34, ZSTD_error_parameter_unsupported = 40, ZSTD_error_parameter_combination_unsupported = 41, ZSTD_error_parameter_outOfBound = 42, ZSTD_error_tableLog_tooLarge = 44, ZSTD_error_maxSymbolValue_tooLarge = 46, ZSTD_error_maxSymbolValue_tooSmall = 48, ZSTD_error_cannotProduce_uncompressedBlock = 49, ZSTD_error_stabilityCondition_notRespected = 50, ZSTD_error_stage_wrong = 60, ZSTD_error_init_missing = 62, ZSTD_error_memory_allocation = 64, ZSTD_error_workSpace_tooSmall = 66, ZSTD_error_dstSize_tooSmall = 70, ZSTD_error_srcSize_wrong = 72, ZSTD_error_dstBuffer_null = 74, ZSTD_error_noForwardProgress_destFull = 80, ZSTD_error_noForwardProgress_inputEmpty = 82, ZSTD_error_frameIndex_tooLarge = 100, ZSTD_error_seekableIO = 102, ZSTD_error_dstBuffer_wrong = 104, ZSTD_error_srcBuffer_wrong = 105, ZSTD_error_sequenceProducer_failed = 106, ZSTD_error_externalSequences_invalid = 107, ZSTD_error_maxCode = 120, } extern "C" { pub fn ZSTD_getErrorString( code: ZSTD_ErrorCode, ) -> *const ::core::ffi::c_char; } extern "C" { #[doc = " ZSTD_versionNumber() :\n Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE)."] pub fn ZSTD_versionNumber() -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_versionString() :\n Return runtime library version, like \"1.4.5\". Requires v1.3.0+."] pub fn ZSTD_versionString() -> *const ::core::ffi::c_char; } extern "C" { #[doc = " Simple Core API\n/\n/*! ZSTD_compress() :\n Compresses `src` content as a single zstd compressed frame into already allocated `dst`.\n NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have\n enough space to successfully compress the data.\n @return : compressed size written into `dst` (<= `dstCapacity),\n or an error code if it fails (which can be tested using ZSTD_isError())."] pub fn ZSTD_compress( dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_decompress() :\n `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.\n Multiple compressed frames can be decompressed at once with this method.\n The result will be the concatenation of all decompressed frames, back to back.\n `dstCapacity` is an upper bound of originalSize to regenerate.\n First frame's decompressed size can be extracted using ZSTD_getFrameContentSize().\n If maximum upper bound isn't known, prefer using streaming mode to decompress data.\n @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),\n or an errorCode if it fails (which can be tested using ZSTD_isError())."] pub fn ZSTD_decompress( dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, compressedSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_getFrameContentSize( src: *const ::core::ffi::c_void, srcSize: usize, ) -> ::core::ffi::c_ulonglong; } extern "C" { #[doc = " ZSTD_getDecompressedSize() (obsolete):\n This function is now obsolete, in favor of ZSTD_getFrameContentSize().\n Both functions work the same way, but ZSTD_getDecompressedSize() blends\n \"empty\", \"unknown\" and \"error\" results to the same return value (0),\n while ZSTD_getFrameContentSize() gives them separate return values.\n @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise."] pub fn ZSTD_getDecompressedSize( src: *const ::core::ffi::c_void, srcSize: usize, ) -> ::core::ffi::c_ulonglong; } extern "C" { #[doc = " ZSTD_findFrameCompressedSize() : Requires v1.4.0+\n `src` should point to the start of a ZSTD frame or skippable frame.\n `srcSize` must be >= first frame size\n @return : the compressed size of the first frame starting at `src`,\n suitable to pass as `srcSize` to `ZSTD_decompress` or similar,\n or an error code if input is invalid\n Note 1: this method is called _find*() because it's not enough to read the header,\n it may have to scan through the frame's content, to reach its end.\n Note 2: this method also works with Skippable Frames. In which case,\n it returns the size of the complete skippable frame,\n which is always equal to its content size + 8 bytes for headers."] pub fn ZSTD_findFrameCompressedSize( src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_compressBound(srcSize: usize) -> usize; } extern "C" { pub fn ZSTD_isError(result: usize) -> ::core::ffi::c_uint; } extern "C" { pub fn ZSTD_getErrorCode(functionResult: usize) -> ZSTD_ErrorCode; } extern "C" { pub fn ZSTD_getErrorName(result: usize) -> *const ::core::ffi::c_char; } extern "C" { pub fn ZSTD_minCLevel() -> ::core::ffi::c_int; } extern "C" { pub fn ZSTD_maxCLevel() -> ::core::ffi::c_int; } extern "C" { pub fn ZSTD_defaultCLevel() -> ::core::ffi::c_int; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_CCtx_s { _unused: [u8; 0], } #[doc = " Explicit context"] pub type ZSTD_CCtx = ZSTD_CCtx_s; extern "C" { pub fn ZSTD_createCCtx() -> *mut ZSTD_CCtx; } extern "C" { pub fn ZSTD_freeCCtx(cctx: *mut ZSTD_CCtx) -> usize; } extern "C" { #[doc = " ZSTD_compressCCtx() :\n Same as ZSTD_compress(), using an explicit ZSTD_CCtx.\n Important : in order to mirror `ZSTD_compress()` behavior,\n this function compresses at the requested compression level,\n __ignoring any other advanced parameter__ .\n If any advanced parameter was set using the advanced API,\n they will all be reset. Only @compressionLevel remains."] pub fn ZSTD_compressCCtx( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, compressionLevel: ::core::ffi::c_int, ) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_DCtx_s { _unused: [u8; 0], } pub type ZSTD_DCtx = ZSTD_DCtx_s; extern "C" { pub fn ZSTD_createDCtx() -> *mut ZSTD_DCtx; } extern "C" { pub fn ZSTD_freeDCtx(dctx: *mut ZSTD_DCtx) -> usize; } extern "C" { #[doc = " ZSTD_decompressDCtx() :\n Same as ZSTD_decompress(),\n requires an allocated ZSTD_DCtx.\n Compatible with sticky parameters (see below)."] pub fn ZSTD_decompressDCtx( dctx: *mut ZSTD_DCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } #[repr(u32)] #[doc = " Advanced compression API (Requires v1.4.0+)"] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_strategy { ZSTD_fast = 1, ZSTD_dfast = 2, ZSTD_greedy = 3, ZSTD_lazy = 4, ZSTD_lazy2 = 5, ZSTD_btlazy2 = 6, ZSTD_btopt = 7, ZSTD_btultra = 8, ZSTD_btultra2 = 9, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_cParameter { ZSTD_c_compressionLevel = 100, ZSTD_c_windowLog = 101, ZSTD_c_hashLog = 102, ZSTD_c_chainLog = 103, ZSTD_c_searchLog = 104, ZSTD_c_minMatch = 105, ZSTD_c_targetLength = 106, ZSTD_c_strategy = 107, ZSTD_c_targetCBlockSize = 130, ZSTD_c_enableLongDistanceMatching = 160, ZSTD_c_ldmHashLog = 161, ZSTD_c_ldmMinMatch = 162, ZSTD_c_ldmBucketSizeLog = 163, ZSTD_c_ldmHashRateLog = 164, ZSTD_c_contentSizeFlag = 200, ZSTD_c_checksumFlag = 201, ZSTD_c_dictIDFlag = 202, ZSTD_c_nbWorkers = 400, ZSTD_c_jobSize = 401, ZSTD_c_overlapLog = 402, ZSTD_c_experimentalParam1 = 500, ZSTD_c_experimentalParam2 = 10, ZSTD_c_experimentalParam3 = 1000, ZSTD_c_experimentalParam4 = 1001, ZSTD_c_experimentalParam5 = 1002, ZSTD_c_experimentalParam7 = 1004, ZSTD_c_experimentalParam8 = 1005, ZSTD_c_experimentalParam9 = 1006, ZSTD_c_experimentalParam10 = 1007, ZSTD_c_experimentalParam11 = 1008, ZSTD_c_experimentalParam12 = 1009, ZSTD_c_experimentalParam13 = 1010, ZSTD_c_experimentalParam14 = 1011, ZSTD_c_experimentalParam15 = 1012, ZSTD_c_experimentalParam16 = 1013, ZSTD_c_experimentalParam17 = 1014, ZSTD_c_experimentalParam18 = 1015, ZSTD_c_experimentalParam19 = 1016, ZSTD_c_experimentalParam20 = 1017, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_bounds { pub error: usize, pub lowerBound: ::core::ffi::c_int, pub upperBound: ::core::ffi::c_int, } extern "C" { #[doc = " ZSTD_cParam_getBounds() :\n All parameters must belong to an interval with lower and upper bounds,\n otherwise they will either trigger an error or be automatically clamped.\n @return : a structure, ZSTD_bounds, which contains\n - an error status field, which must be tested using ZSTD_isError()\n - lower and upper bounds, both inclusive"] pub fn ZSTD_cParam_getBounds(cParam: ZSTD_cParameter) -> ZSTD_bounds; } extern "C" { #[doc = " ZSTD_CCtx_setParameter() :\n Set one compression parameter, selected by enum ZSTD_cParameter.\n All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds().\n Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).\n Setting a parameter is generally only possible during frame initialization (before starting compression).\n Exception : when using multi-threading mode (nbWorkers >= 1),\n the following parameters can be updated _during_ compression (within same frame):\n => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.\n new parameters will be active for next job only (after a flush()).\n @return : an error code (which can be tested using ZSTD_isError())."] pub fn ZSTD_CCtx_setParameter( cctx: *mut ZSTD_CCtx, param: ZSTD_cParameter, value: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_setPledgedSrcSize() :\n Total input data size to be compressed as a single frame.\n Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag.\n This value will also be controlled at end of frame, and trigger an error if not respected.\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame.\n In order to mean \"unknown content size\", pass constant ZSTD_CONTENTSIZE_UNKNOWN.\n ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame.\n Note 2 : pledgedSrcSize is only valid once, for the next frame.\n It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN.\n Note 3 : Whenever all input data is provided and consumed in a single round,\n for example with ZSTD_compress2(),\n or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),\n this value is automatically overridden by srcSize instead."] pub fn ZSTD_CCtx_setPledgedSrcSize( cctx: *mut ZSTD_CCtx, pledgedSrcSize: ::core::ffi::c_ulonglong, ) -> usize; } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_ResetDirective { ZSTD_reset_session_only = 1, ZSTD_reset_parameters = 2, ZSTD_reset_session_and_parameters = 3, } extern "C" { #[doc = " ZSTD_CCtx_reset() :\n There are 2 different things that can be reset, independently or jointly :\n - The session : will stop compressing current frame, and make CCtx ready to start a new one.\n Useful after an error, or to interrupt any ongoing compression.\n Any internal data not yet flushed is cancelled.\n Compression parameters and dictionary remain unchanged.\n They will be used to compress next frame.\n Resetting session never fails.\n - The parameters : changes all parameters back to \"default\".\n This also removes any reference to any dictionary or external sequence producer.\n Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)\n otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())\n - Both : similar to resetting the session, followed by resetting parameters."] pub fn ZSTD_CCtx_reset( cctx: *mut ZSTD_CCtx, reset: ZSTD_ResetDirective, ) -> usize; } extern "C" { #[doc = " ZSTD_compress2() :\n Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.\n (note that this entry point doesn't even expose a compression level parameter).\n ZSTD_compress2() always starts a new frame.\n Should cctx hold data from a previously unfinished frame, everything about it is forgotten.\n - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\n - The function is always blocking, returns when compression is completed.\n NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have\n enough space to successfully compress the data, though it is possible it fails for other reasons.\n @return : compressed size written into `dst` (<= `dstCapacity),\n or an error code if it fails (which can be tested using ZSTD_isError())."] pub fn ZSTD_compress2( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } #[repr(u32)] #[doc = " Advanced decompression API (Requires v1.4.0+)"] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_dParameter { ZSTD_d_windowLogMax = 100, ZSTD_d_experimentalParam1 = 1000, ZSTD_d_experimentalParam2 = 1001, ZSTD_d_experimentalParam3 = 1002, ZSTD_d_experimentalParam4 = 1003, ZSTD_d_experimentalParam5 = 1004, ZSTD_d_experimentalParam6 = 1005, } extern "C" { #[doc = " ZSTD_dParam_getBounds() :\n All parameters must belong to an interval with lower and upper bounds,\n otherwise they will either trigger an error or be automatically clamped.\n @return : a structure, ZSTD_bounds, which contains\n - an error status field, which must be tested using ZSTD_isError()\n - both lower and upper bounds, inclusive"] pub fn ZSTD_dParam_getBounds(dParam: ZSTD_dParameter) -> ZSTD_bounds; } extern "C" { #[doc = " ZSTD_DCtx_setParameter() :\n Set one compression parameter, selected by enum ZSTD_dParameter.\n All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().\n Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).\n Setting a parameter is only possible during frame initialization (before starting decompression).\n @return : 0, or an error code (which can be tested using ZSTD_isError())."] pub fn ZSTD_DCtx_setParameter( dctx: *mut ZSTD_DCtx, param: ZSTD_dParameter, value: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_reset() :\n Return a DCtx to clean state.\n Session and parameters can be reset jointly or separately.\n Parameters can only be reset when no active frame is being decompressed.\n @return : 0, or an error code, which can be tested with ZSTD_isError()"] pub fn ZSTD_DCtx_reset( dctx: *mut ZSTD_DCtx, reset: ZSTD_ResetDirective, ) -> usize; } #[doc = " Streaming"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_inBuffer_s { #[doc = "< start of input buffer"] pub src: *const ::core::ffi::c_void, #[doc = "< size of input buffer"] pub size: usize, #[doc = "< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size"] pub pos: usize, } #[doc = " Streaming"] pub type ZSTD_inBuffer = ZSTD_inBuffer_s; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_outBuffer_s { #[doc = "< start of output buffer"] pub dst: *mut ::core::ffi::c_void, #[doc = "< size of output buffer"] pub size: usize, #[doc = "< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size"] pub pos: usize, } pub type ZSTD_outBuffer = ZSTD_outBuffer_s; pub type ZSTD_CStream = ZSTD_CCtx; extern "C" { pub fn ZSTD_createCStream() -> *mut ZSTD_CStream; } extern "C" { pub fn ZSTD_freeCStream(zcs: *mut ZSTD_CStream) -> usize; } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_EndDirective { ZSTD_e_continue = 0, ZSTD_e_flush = 1, ZSTD_e_end = 2, } extern "C" { #[doc = " ZSTD_compressStream2() : Requires v1.4.0+\n Behaves about the same as ZSTD_compressStream, with additional control on end directive.\n - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\n - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)\n - output->pos must be <= dstCapacity, input->pos must be <= srcSize\n - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.\n - endOp must be a valid directive\n - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.\n - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,\n and then immediately returns, just indicating that there is some data remaining to be flushed.\n The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.\n - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.\n - @return provides a minimum amount of data remaining to be flushed from internal buffers\n or an error code, which can be tested using ZSTD_isError().\n if @return != 0, flush is not fully completed, there is still some data left within internal buffers.\n This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.\n For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.\n - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),\n only ZSTD_e_end or ZSTD_e_flush operations are allowed.\n Before starting a new compression job, or changing compression parameters,\n it is required to fully flush internal buffers.\n - note: if an operation ends with an error, it may leave @cctx in an undefined state.\n Therefore, it's UB to invoke ZSTD_compressStream2() of ZSTD_compressStream() on such a state.\n In order to be re-employed after an error, a state must be reset,\n which can be done explicitly (ZSTD_CCtx_reset()),\n or is sometimes implied by methods starting a new compression job (ZSTD_initCStream(), ZSTD_compressCCtx())"] pub fn ZSTD_compressStream2( cctx: *mut ZSTD_CCtx, output: *mut ZSTD_outBuffer, input: *mut ZSTD_inBuffer, endOp: ZSTD_EndDirective, ) -> usize; } extern "C" { pub fn ZSTD_CStreamInSize() -> usize; } extern "C" { pub fn ZSTD_CStreamOutSize() -> usize; } extern "C" { #[doc = " Equivalent to:\n\n ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)\n ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);\n\n Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API\n to compress with a dictionary."] pub fn ZSTD_initCStream( zcs: *mut ZSTD_CStream, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).\n NOTE: The return value is different. ZSTD_compressStream() returns a hint for\n the next read size (if non-zero and not an error). ZSTD_compressStream2()\n returns the minimum nb of bytes left to flush (if non-zero and not an error)."] pub fn ZSTD_compressStream( zcs: *mut ZSTD_CStream, output: *mut ZSTD_outBuffer, input: *mut ZSTD_inBuffer, ) -> usize; } extern "C" { #[doc = " Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush)."] pub fn ZSTD_flushStream( zcs: *mut ZSTD_CStream, output: *mut ZSTD_outBuffer, ) -> usize; } extern "C" { #[doc = " Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end)."] pub fn ZSTD_endStream( zcs: *mut ZSTD_CStream, output: *mut ZSTD_outBuffer, ) -> usize; } pub type ZSTD_DStream = ZSTD_DCtx; extern "C" { pub fn ZSTD_createDStream() -> *mut ZSTD_DStream; } extern "C" { pub fn ZSTD_freeDStream(zds: *mut ZSTD_DStream) -> usize; } extern "C" { #[doc = " ZSTD_initDStream() :\n Initialize/reset DStream state for new decompression operation.\n Call before new decompression operation using same DStream.\n\n Note : This function is redundant with the advanced API and equivalent to:\n ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\n ZSTD_DCtx_refDDict(zds, NULL);"] pub fn ZSTD_initDStream(zds: *mut ZSTD_DStream) -> usize; } extern "C" { #[doc = " ZSTD_decompressStream() :\n Streaming decompression function.\n Call repetitively to consume full input updating it as necessary.\n Function will update both input and output `pos` fields exposing current state via these fields:\n - `input.pos < input.size`, some input remaining and caller should provide remaining input\n on the next call.\n - `output.pos < output.size`, decoder flushed internal output buffer.\n - `output.pos == output.size`, unflushed data potentially present in the internal buffers,\n check ZSTD_decompressStream() @return value,\n if > 0, invoke it again to flush remaining data to output.\n Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX.\n\n @return : 0 when a frame is completely decoded and fully flushed,\n or an error code, which can be tested using ZSTD_isError(),\n or any other value > 0, which means there is some decoding or flushing to do to complete current frame.\n\n Note: when an operation returns with an error code, the @zds state may be left in undefined state.\n It's UB to invoke `ZSTD_decompressStream()` on such a state.\n In order to re-use such a state, it must be first reset,\n which can be done explicitly (`ZSTD_DCtx_reset()`),\n or is implied for operations starting some new decompression job (`ZSTD_initDStream`, `ZSTD_decompressDCtx()`, `ZSTD_decompress_usingDict()`)"] pub fn ZSTD_decompressStream( zds: *mut ZSTD_DStream, output: *mut ZSTD_outBuffer, input: *mut ZSTD_inBuffer, ) -> usize; } extern "C" { pub fn ZSTD_DStreamInSize() -> usize; } extern "C" { pub fn ZSTD_DStreamOutSize() -> usize; } extern "C" { #[doc = " Simple dictionary API\n/\n/*! ZSTD_compress_usingDict() :\n Compression at an explicit compression level using a Dictionary.\n A dictionary can be any arbitrary data segment (also called a prefix),\n or a buffer with specified information (see zdict.h).\n Note : This function loads the dictionary, resulting in significant startup delay.\n It's intended for a dictionary used only once.\n Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used."] pub fn ZSTD_compress_usingDict( ctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, dict: *const ::core::ffi::c_void, dictSize: usize, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_decompress_usingDict() :\n Decompression using a known Dictionary.\n Dictionary must be identical to the one used during compression.\n Note : This function loads the dictionary, resulting in significant startup delay.\n It's intended for a dictionary used only once.\n Note : When `dict == NULL || dictSize < 8` no dictionary is used."] pub fn ZSTD_decompress_usingDict( dctx: *mut ZSTD_DCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, dict: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_CDict_s { _unused: [u8; 0], } #[doc = " Bulk processing dictionary API"] pub type ZSTD_CDict = ZSTD_CDict_s; extern "C" { #[doc = " ZSTD_createCDict() :\n When compressing multiple messages or blocks using the same dictionary,\n it's recommended to digest the dictionary only once, since it's a costly operation.\n ZSTD_createCDict() will create a state from digesting a dictionary.\n The resulting state can be used for future compression operations with very limited startup cost.\n ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.\n @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.\n Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.\n Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,\n in which case the only thing that it transports is the @compressionLevel.\n This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,\n expecting a ZSTD_CDict parameter with any data, including those without a known dictionary."] pub fn ZSTD_createCDict( dictBuffer: *const ::core::ffi::c_void, dictSize: usize, compressionLevel: ::core::ffi::c_int, ) -> *mut ZSTD_CDict; } extern "C" { #[doc = " ZSTD_freeCDict() :\n Function frees memory allocated by ZSTD_createCDict().\n If a NULL pointer is passed, no operation is performed."] pub fn ZSTD_freeCDict(CDict: *mut ZSTD_CDict) -> usize; } extern "C" { #[doc = " ZSTD_compress_usingCDict() :\n Compression using a digested Dictionary.\n Recommended when same dictionary is used multiple times.\n Note : compression level is _decided at dictionary creation time_,\n and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no)"] pub fn ZSTD_compress_usingCDict( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, cdict: *const ZSTD_CDict, ) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_DDict_s { _unused: [u8; 0], } pub type ZSTD_DDict = ZSTD_DDict_s; extern "C" { #[doc = " ZSTD_createDDict() :\n Create a digested dictionary, ready to start decompression operation without startup delay.\n dictBuffer can be released after DDict creation, as its content is copied inside DDict."] pub fn ZSTD_createDDict( dictBuffer: *const ::core::ffi::c_void, dictSize: usize, ) -> *mut ZSTD_DDict; } extern "C" { #[doc = " ZSTD_freeDDict() :\n Function frees memory allocated with ZSTD_createDDict()\n If a NULL pointer is passed, no operation is performed."] pub fn ZSTD_freeDDict(ddict: *mut ZSTD_DDict) -> usize; } extern "C" { #[doc = " ZSTD_decompress_usingDDict() :\n Decompression using a digested Dictionary.\n Recommended when same dictionary is used multiple times."] pub fn ZSTD_decompress_usingDDict( dctx: *mut ZSTD_DCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ddict: *const ZSTD_DDict, ) -> usize; } extern "C" { #[doc = " ZSTD_getDictID_fromDict() : Requires v1.4.0+\n Provides the dictID stored within dictionary.\n if @return == 0, the dictionary is not conformant with Zstandard specification.\n It can still be loaded, but as a content-only dictionary."] pub fn ZSTD_getDictID_fromDict( dict: *const ::core::ffi::c_void, dictSize: usize, ) -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_getDictID_fromCDict() : Requires v1.5.0+\n Provides the dictID of the dictionary loaded into `cdict`.\n If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.\n Non-conformant dictionaries can still be loaded, but as content-only dictionaries."] pub fn ZSTD_getDictID_fromCDict( cdict: *const ZSTD_CDict, ) -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_getDictID_fromDDict() : Requires v1.4.0+\n Provides the dictID of the dictionary loaded into `ddict`.\n If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.\n Non-conformant dictionaries can still be loaded, but as content-only dictionaries."] pub fn ZSTD_getDictID_fromDDict( ddict: *const ZSTD_DDict, ) -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_getDictID_fromFrame() : Requires v1.4.0+\n Provides the dictID required to decompressed the frame stored within `src`.\n If @return == 0, the dictID could not be decoded.\n This could for one of the following reasons :\n - The frame does not require a dictionary to be decoded (most common case).\n - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden piece of information.\n Note : this use case also happens when using a non-conformant dictionary.\n - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).\n - This is not a Zstandard frame.\n When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code."] pub fn ZSTD_getDictID_fromFrame( src: *const ::core::ffi::c_void, srcSize: usize, ) -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_CCtx_loadDictionary() : Requires v1.4.0+\n Create an internal CDict from `dict` buffer.\n Decompression will have to use same dictionary.\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,\n meaning \"return to no-dictionary mode\".\n Note 1 : Dictionary is sticky, it will be used for all future compressed frames,\n until parameters are reset, a new dictionary is loaded, or the dictionary\n is explicitly invalidated by loading a NULL dictionary.\n Note 2 : Loading a dictionary involves building tables.\n It's also a CPU consuming operation, with non-negligible impact on latency.\n Tables are dependent on compression parameters, and for this reason,\n compression parameters can no longer be changed after loading a dictionary.\n Note 3 :`dict` content will be copied internally.\n Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.\n In such a case, dictionary buffer must outlive its users.\n Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()\n to precisely select how dictionary content must be interpreted.\n Note 5 : This method does not benefit from LDM (long distance mode).\n If you want to employ LDM on some large dictionary content,\n prefer employing ZSTD_CCtx_refPrefix() described below."] pub fn ZSTD_CCtx_loadDictionary( cctx: *mut ZSTD_CCtx, dict: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_refCDict() : Requires v1.4.0+\n Reference a prepared dictionary, to be used for all future compressed frames.\n Note that compression parameters are enforced from within CDict,\n and supersede any compression parameter previously set within CCtx.\n The parameters ignored are labelled as \"superseded-by-cdict\" in the ZSTD_cParameter enum docs.\n The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.\n The dictionary will remain valid for future compressed frames using same CCtx.\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Special : Referencing a NULL CDict means \"return to no-dictionary mode\".\n Note 1 : Currently, only one dictionary can be managed.\n Referencing a new dictionary effectively \"discards\" any previous one.\n Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx."] pub fn ZSTD_CCtx_refCDict( cctx: *mut ZSTD_CCtx, cdict: *const ZSTD_CDict, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_refPrefix() : Requires v1.4.0+\n Reference a prefix (single-usage dictionary) for next compressed frame.\n A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).\n Decompression will need same prefix to properly regenerate data.\n Compressing with a prefix is similar in outcome as performing a diff and compressing it,\n but performs much faster, especially during decompression (compression speed is tunable with compression level).\n This method is compatible with LDM (long distance mode).\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary\n Note 1 : Prefix buffer is referenced. It **must** outlive compression.\n Its content must remain unmodified during compression.\n Note 2 : If the intention is to diff some large src data blob with some prior version of itself,\n ensure that the window size is large enough to contain the entire source.\n See ZSTD_c_windowLog.\n Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.\n It's a CPU consuming operation, with non-negligible impact on latency.\n If there is a need to use the same prefix multiple times, consider loadDictionary instead.\n Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).\n Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation."] pub fn ZSTD_CCtx_refPrefix( cctx: *mut ZSTD_CCtx, prefix: *const ::core::ffi::c_void, prefixSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_loadDictionary() : Requires v1.4.0+\n Create an internal DDict from dict buffer, to be used to decompress all future frames.\n The dictionary remains valid for all future frames, until explicitly invalidated, or\n a new dictionary is loaded.\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,\n meaning \"return to no-dictionary mode\".\n Note 1 : Loading a dictionary involves building tables,\n which has a non-negligible impact on CPU usage and latency.\n It's recommended to \"load once, use many times\", to amortize the cost\n Note 2 :`dict` content will be copied internally, so `dict` can be released after loading.\n Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead.\n Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of\n how dictionary content is loaded and interpreted."] pub fn ZSTD_DCtx_loadDictionary( dctx: *mut ZSTD_DCtx, dict: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_refDDict() : Requires v1.4.0+\n Reference a prepared dictionary, to be used to decompress next frames.\n The dictionary remains active for decompression of future frames using same DCtx.\n\n If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function\n will store the DDict references in a table, and the DDict used for decompression\n will be determined at decompression time, as per the dict ID in the frame.\n The memory for the table is allocated on the first call to refDDict, and can be\n freed with ZSTD_freeDCtx().\n\n If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary\n will be managed, and referencing a dictionary effectively \"discards\" any previous one.\n\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Special: referencing a NULL DDict means \"return to no-dictionary mode\".\n Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx."] pub fn ZSTD_DCtx_refDDict( dctx: *mut ZSTD_DCtx, ddict: *const ZSTD_DDict, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_refPrefix() : Requires v1.4.0+\n Reference a prefix (single-usage dictionary) to decompress next frame.\n This is the reverse operation of ZSTD_CCtx_refPrefix(),\n and must use the same prefix as the one used during compression.\n Prefix is **only used once**. Reference is discarded at end of frame.\n End of frame is reached when ZSTD_decompressStream() returns 0.\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary\n Note 2 : Prefix buffer is referenced. It **must** outlive decompression.\n Prefix buffer must remain unmodified up to the end of frame,\n reached when ZSTD_decompressStream() returns 0.\n Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).\n Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)\n Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.\n A full dictionary is more costly, as it requires building tables."] pub fn ZSTD_DCtx_refPrefix( dctx: *mut ZSTD_DCtx, prefix: *const ::core::ffi::c_void, prefixSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_sizeof_*() : Requires v1.4.0+\n These functions give the _current_ memory usage of selected object.\n Note that object memory usage can evolve (increase or decrease) over time."] pub fn ZSTD_sizeof_CCtx(cctx: *const ZSTD_CCtx) -> usize; } extern "C" { pub fn ZSTD_sizeof_DCtx(dctx: *const ZSTD_DCtx) -> usize; } extern "C" { pub fn ZSTD_sizeof_CStream(zcs: *const ZSTD_CStream) -> usize; } extern "C" { pub fn ZSTD_sizeof_DStream(zds: *const ZSTD_DStream) -> usize; } extern "C" { pub fn ZSTD_sizeof_CDict(cdict: *const ZSTD_CDict) -> usize; } extern "C" { pub fn ZSTD_sizeof_DDict(ddict: *const ZSTD_DDict) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_CCtx_params_s { _unused: [u8; 0], } pub type ZSTD_CCtx_params = ZSTD_CCtx_params_s; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_Sequence { pub offset: ::core::ffi::c_uint, pub litLength: ::core::ffi::c_uint, pub matchLength: ::core::ffi::c_uint, pub rep: ::core::ffi::c_uint, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_compressionParameters { #[doc = "< largest match distance : larger == more compression, more memory needed during decompression"] pub windowLog: ::core::ffi::c_uint, #[doc = "< fully searched segment : larger == more compression, slower, more memory (useless for fast)"] pub chainLog: ::core::ffi::c_uint, #[doc = "< dispatch table : larger == faster, more memory"] pub hashLog: ::core::ffi::c_uint, #[doc = "< nb of searches : larger == more compression, slower"] pub searchLog: ::core::ffi::c_uint, #[doc = "< match length searched : larger == faster decompression, sometimes less compression"] pub minMatch: ::core::ffi::c_uint, #[doc = "< acceptable match size for optimal parser (only) : larger == more compression, slower"] pub targetLength: ::core::ffi::c_uint, #[doc = "< see ZSTD_strategy definition above"] pub strategy: ZSTD_strategy, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_frameParameters { #[doc = "< 1: content size will be in frame header (when known)"] pub contentSizeFlag: ::core::ffi::c_int, #[doc = "< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection"] pub checksumFlag: ::core::ffi::c_int, #[doc = "< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression)"] pub noDictIDFlag: ::core::ffi::c_int, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_parameters { pub cParams: ZSTD_compressionParameters, pub fParams: ZSTD_frameParameters, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_dictContentType_e { ZSTD_dct_auto = 0, ZSTD_dct_rawContent = 1, ZSTD_dct_fullDict = 2, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_dictLoadMethod_e { #[doc = "< Copy dictionary content internally"] ZSTD_dlm_byCopy = 0, #[doc = "< Reference dictionary content -- the dictionary buffer must outlive its users."] ZSTD_dlm_byRef = 1, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_format_e { ZSTD_f_zstd1 = 0, ZSTD_f_zstd1_magicless = 1, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_forceIgnoreChecksum_e { ZSTD_d_validateChecksum = 0, ZSTD_d_ignoreChecksum = 1, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_refMultipleDDicts_e { ZSTD_rmd_refSingleDDict = 0, ZSTD_rmd_refMultipleDDicts = 1, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_dictAttachPref_e { ZSTD_dictDefaultAttach = 0, ZSTD_dictForceAttach = 1, ZSTD_dictForceCopy = 2, ZSTD_dictForceLoad = 3, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_literalCompressionMode_e { #[doc = "< Automatically determine the compression mode based on the compression level.\n Negative compression levels will be uncompressed, and positive compression\n levels will be compressed."] ZSTD_lcm_auto = 0, #[doc = "< Always attempt Huffman compression. Uncompressed literals will still be\n emitted if Huffman compression is not profitable."] ZSTD_lcm_huffman = 1, #[doc = "< Always emit uncompressed literals."] ZSTD_lcm_uncompressed = 2, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_ParamSwitch_e { ZSTD_ps_auto = 0, ZSTD_ps_enable = 1, ZSTD_ps_disable = 2, } extern "C" { #[doc = " ZSTD_findDecompressedSize() :\n `src` should point to the start of a series of ZSTD encoded and/or skippable frames\n `srcSize` must be the _exact_ size of this series\n (i.e. there should be a frame boundary at `src + srcSize`)\n @return : - decompressed size of all data in all successive frames\n - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN\n - if an error occurred: ZSTD_CONTENTSIZE_ERROR\n\n note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.\n When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.\n In which case, it's necessary to use streaming mode to decompress data.\n note 2 : decompressed size is always present when compression is done with ZSTD_compress()\n note 3 : decompressed size can be very large (64-bits value),\n potentially larger than what local system can handle as a single memory segment.\n In which case, it's necessary to use streaming mode to decompress data.\n note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.\n Always ensure result fits within application's authorized limits.\n Each application can set its own limits.\n note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to\n read each contained frame header. This is fast as most of the data is skipped,\n however it does mean that all frame data must be present and valid."] pub fn ZSTD_findDecompressedSize( src: *const ::core::ffi::c_void, srcSize: usize, ) -> ::core::ffi::c_ulonglong; } extern "C" { #[doc = " ZSTD_decompressBound() :\n `src` should point to the start of a series of ZSTD encoded and/or skippable frames\n `srcSize` must be the _exact_ size of this series\n (i.e. there should be a frame boundary at `src + srcSize`)\n @return : - upper-bound for the decompressed size of all data in all successive frames\n - if an error occurred: ZSTD_CONTENTSIZE_ERROR\n\n note 1 : an error can occur if `src` contains an invalid or incorrectly formatted frame.\n note 2 : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.\n in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value.\n note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:\n upper-bound = # blocks * min(128 KB, Window_Size)"] pub fn ZSTD_decompressBound( src: *const ::core::ffi::c_void, srcSize: usize, ) -> ::core::ffi::c_ulonglong; } extern "C" { #[doc = " ZSTD_frameHeaderSize() :\n srcSize must be large enough, aka >= ZSTD_FRAMEHEADERSIZE_PREFIX.\n @return : size of the Frame Header,\n or an error code (if srcSize is too small)"] pub fn ZSTD_frameHeaderSize( src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_FrameType_e { ZSTD_frame = 0, ZSTD_skippableFrame = 1, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_FrameHeader { pub frameContentSize: ::core::ffi::c_ulonglong, pub windowSize: ::core::ffi::c_ulonglong, pub blockSizeMax: ::core::ffi::c_uint, pub frameType: ZSTD_FrameType_e, pub headerSize: ::core::ffi::c_uint, pub dictID: ::core::ffi::c_uint, pub checksumFlag: ::core::ffi::c_uint, pub _reserved1: ::core::ffi::c_uint, pub _reserved2: ::core::ffi::c_uint, } extern "C" { #[doc = " ZSTD_getFrameHeader() :\n decode Frame Header into `zfhPtr`, or requires larger `srcSize`.\n @return : 0 => header is complete, `zfhPtr` is correctly filled,\n >0 => `srcSize` is too small, @return value is the wanted `srcSize` amount, `zfhPtr` is not filled,\n or an error code, which can be tested using ZSTD_isError()"] pub fn ZSTD_getFrameHeader( zfhPtr: *mut ZSTD_FrameHeader, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_getFrameHeader_advanced() :\n same as ZSTD_getFrameHeader(),\n with added capability to select a format (like ZSTD_f_zstd1_magicless)"] pub fn ZSTD_getFrameHeader_advanced( zfhPtr: *mut ZSTD_FrameHeader, src: *const ::core::ffi::c_void, srcSize: usize, format: ZSTD_format_e, ) -> usize; } extern "C" { #[doc = " ZSTD_decompressionMargin() :\n Zstd supports in-place decompression, where the input and output buffers overlap.\n In this case, the output buffer must be at least (Margin + Output_Size) bytes large,\n and the input buffer must be at the end of the output buffer.\n\n _______________________ Output Buffer ________________________\n | |\n | ____ Input Buffer ____|\n | | |\n v v v\n |---------------------------------------|-----------|----------|\n ^ ^ ^\n |___________________ Output_Size ___________________|_ Margin _|\n\n NOTE: See also ZSTD_DECOMPRESSION_MARGIN().\n NOTE: This applies only to single-pass decompression through ZSTD_decompress() or\n ZSTD_decompressDCtx().\n NOTE: This function supports multi-frame input.\n\n @param src The compressed frame(s)\n @param srcSize The size of the compressed frame(s)\n @returns The decompression margin or an error that can be checked with ZSTD_isError()."] pub fn ZSTD_decompressionMargin( src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_SequenceFormat_e { ZSTD_sf_noBlockDelimiters = 0, ZSTD_sf_explicitBlockDelimiters = 1, } extern "C" { #[doc = " ZSTD_sequenceBound() :\n `srcSize` : size of the input buffer\n @return : upper-bound for the number of sequences that can be generated\n from a buffer of srcSize bytes\n\n note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence)."] pub fn ZSTD_sequenceBound(srcSize: usize) -> usize; } extern "C" { #[doc = " ZSTD_generateSequences() :\n WARNING: This function is meant for debugging and informational purposes ONLY!\n Its implementation is flawed, and it will be deleted in a future version.\n It is not guaranteed to succeed, as there are several cases where it will give\n up and fail. You should NOT use this function in production code.\n\n This function is deprecated, and will be removed in a future version.\n\n Generate sequences using ZSTD_compress2(), given a source buffer.\n\n @param zc The compression context to be used for ZSTD_compress2(). Set any\n compression parameters you need on this context.\n @param outSeqs The output sequences buffer of size @p outSeqsSize\n @param outSeqsCapacity The size of the output sequences buffer.\n ZSTD_sequenceBound(srcSize) is an upper bound on the number\n of sequences that can be generated.\n @param src The source buffer to generate sequences from of size @p srcSize.\n @param srcSize The size of the source buffer.\n\n Each block will end with a dummy sequence\n with offset == 0, matchLength == 0, and litLength == length of last literals.\n litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)\n simply acts as a block delimiter.\n\n @returns The number of sequences generated, necessarily less than\n ZSTD_sequenceBound(srcSize), or an error code that can be checked\n with ZSTD_isError()."] pub fn ZSTD_generateSequences( zc: *mut ZSTD_CCtx, outSeqs: *mut ZSTD_Sequence, outSeqsCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_mergeBlockDelimiters() :\n Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals\n by merging them into the literals of the next sequence.\n\n As such, the final generated result has no explicit representation of block boundaries,\n and the final last literals segment is not represented in the sequences.\n\n The output of this function can be fed into ZSTD_compressSequences() with CCtx\n setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters\n @return : number of sequences left after merging"] pub fn ZSTD_mergeBlockDelimiters( sequences: *mut ZSTD_Sequence, seqsSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_compressSequences() :\n Compress an array of ZSTD_Sequence, associated with @src buffer, into dst.\n @src contains the entire input (not just the literals).\n If @srcSize > sum(sequence.length), the remaining bytes are considered all literals\n If a dictionary is included, then the cctx should reference the dict (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.).\n The entire source is compressed into a single frame.\n\n The compression behavior changes based on cctx params. In particular:\n If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain\n no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on\n the block size derived from the cctx, and sequences may be split. This is the default setting.\n\n If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain\n valid block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.\n\n When ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, it's possible to decide generating repcodes\n using the advanced parameter ZSTD_c_repcodeResolution. Repcodes will improve compression ratio, though the benefit\n can vary greatly depending on Sequences. On the other hand, repcode resolution is an expensive operation.\n By default, it's disabled at low (<10) compression levels, and enabled above the threshold (>=10).\n ZSTD_c_repcodeResolution makes it possible to directly manage this processing in either direction.\n\n If ZSTD_c_validateSequences == 0, this function blindly accepts the Sequences provided. Invalid Sequences cause undefined\n behavior. If ZSTD_c_validateSequences == 1, then the function will detect invalid Sequences (see doc/zstd_compression_format.md for\n specifics regarding offset/matchlength requirements) and then bail out and return an error.\n\n In addition to the two adjustable experimental params, there are other important cctx params.\n - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN.\n - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression.\n - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset\n is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md\n\n Note: Repcodes are, as of now, always re-calculated within this function, ZSTD_Sequence.rep is effectively unused.\n Dev Note: Once ability to ingest repcodes become available, the explicit block delims mode must respect those repcodes exactly,\n and cannot emit an RLE block that disagrees with the repcode history.\n @return : final compressed size, or a ZSTD error code."] pub fn ZSTD_compressSequences( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, inSeqs: *const ZSTD_Sequence, inSeqsSize: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_compressSequencesAndLiterals() :\n This is a variant of ZSTD_compressSequences() which,\n instead of receiving (src,srcSize) as input parameter, receives (literals,litSize),\n aka all the literals, already extracted and laid out into a single continuous buffer.\n This can be useful if the process generating the sequences also happens to generate the buffer of literals,\n thus skipping an extraction + caching stage.\n It's a speed optimization, useful when the right conditions are met,\n but it also features the following limitations:\n - Only supports explicit delimiter mode\n - Currently does not support Sequences validation (so input Sequences are trusted)\n - Not compatible with frame checksum, which must be disabled\n - If any block is incompressible, will fail and return an error\n - @litSize must be == sum of all @.litLength fields in @inSeqs. Any discrepancy will generate an error.\n - @litBufCapacity is the size of the underlying buffer into which literals are written, starting at address @literals.\n @litBufCapacity must be at least 8 bytes larger than @litSize.\n - @decompressedSize must be correct, and correspond to the sum of all Sequences. Any discrepancy will generate an error.\n @return : final compressed size, or a ZSTD error code."] pub fn ZSTD_compressSequencesAndLiterals( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, inSeqs: *const ZSTD_Sequence, nbSequences: usize, literals: *const ::core::ffi::c_void, litSize: usize, litBufCapacity: usize, decompressedSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_writeSkippableFrame() :\n Generates a zstd skippable frame containing data given by src, and writes it to dst buffer.\n\n Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number,\n ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.\n As such, the parameter magicVariant controls the exact skippable frame magic number variant used,\n so the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.\n\n Returns an error if destination buffer is not large enough, if the source size is not representable\n with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).\n\n @return : number of bytes written or a ZSTD error."] pub fn ZSTD_writeSkippableFrame( dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, magicVariant: ::core::ffi::c_uint, ) -> usize; } extern "C" { #[doc = " ZSTD_readSkippableFrame() :\n Retrieves the content of a zstd skippable frame starting at @src, and writes it to @dst buffer.\n\n The parameter @magicVariant will receive the magicVariant that was supplied when the frame was written,\n i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START.\n This can be NULL if the caller is not interested in the magicVariant.\n\n Returns an error if destination buffer is not large enough, or if the frame is not skippable.\n\n @return : number of bytes written or a ZSTD error."] pub fn ZSTD_readSkippableFrame( dst: *mut ::core::ffi::c_void, dstCapacity: usize, magicVariant: *mut ::core::ffi::c_uint, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_isSkippableFrame() :\n Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame."] pub fn ZSTD_isSkippableFrame( buffer: *const ::core::ffi::c_void, size: usize, ) -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_estimate*() :\n These functions make it possible to estimate memory usage\n of a future {D,C}Ctx, before its creation.\n This is useful in combination with ZSTD_initStatic(),\n which makes it possible to employ a static buffer for ZSTD_CCtx* state.\n\n ZSTD_estimateCCtxSize() will provide a memory budget large enough\n to compress data of any size using one-shot compression ZSTD_compressCCtx() or ZSTD_compress2()\n associated with any compression level up to max specified one.\n The estimate will assume the input may be arbitrarily large,\n which is the worst case.\n\n Note that the size estimation is specific for one-shot compression,\n it is not valid for streaming (see ZSTD_estimateCStreamSize*())\n nor other potential ways of using a ZSTD_CCtx* state.\n\n When srcSize can be bound by a known and rather \"small\" value,\n this knowledge can be used to provide a tighter budget estimation\n because the ZSTD_CCtx* state will need less memory for small inputs.\n This tighter estimation can be provided by employing more advanced functions\n ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(),\n and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().\n Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.\n\n Note : only single-threaded compression is supported.\n ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1."] pub fn ZSTD_estimateCCtxSize( maxCompressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { pub fn ZSTD_estimateCCtxSize_usingCParams( cParams: ZSTD_compressionParameters, ) -> usize; } extern "C" { pub fn ZSTD_estimateCCtxSize_usingCCtxParams( params: *const ZSTD_CCtx_params, ) -> usize; } extern "C" { pub fn ZSTD_estimateDCtxSize() -> usize; } extern "C" { #[doc = " ZSTD_estimateCStreamSize() :\n ZSTD_estimateCStreamSize() will provide a memory budget large enough for streaming compression\n using any compression level up to the max specified one.\n It will also consider src size to be arbitrarily \"large\", which is a worst case scenario.\n If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.\n ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.\n ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.\n Note : CStream size estimation is only correct for single-threaded compression.\n ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.\n Note 2 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time.\n Size estimates assume that no external sequence producer is registered.\n\n ZSTD_DStream memory budget depends on frame's window Size.\n This information can be passed manually, using ZSTD_estimateDStreamSize,\n or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();\n Any frame requesting a window size larger than max specified one will be rejected.\n Note : if streaming is init with function ZSTD_init?Stream_usingDict(),\n an internal ?Dict will be created, which additional size is not estimated here.\n In this case, get total size by adding ZSTD_estimate?DictSize"] pub fn ZSTD_estimateCStreamSize( maxCompressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { pub fn ZSTD_estimateCStreamSize_usingCParams( cParams: ZSTD_compressionParameters, ) -> usize; } extern "C" { pub fn ZSTD_estimateCStreamSize_usingCCtxParams( params: *const ZSTD_CCtx_params, ) -> usize; } extern "C" { pub fn ZSTD_estimateDStreamSize(maxWindowSize: usize) -> usize; } extern "C" { pub fn ZSTD_estimateDStreamSize_fromFrame( src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_estimate?DictSize() :\n ZSTD_estimateCDictSize() will bet that src size is relatively \"small\", and content is copied, like ZSTD_createCDict().\n ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().\n Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller."] pub fn ZSTD_estimateCDictSize( dictSize: usize, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { pub fn ZSTD_estimateCDictSize_advanced( dictSize: usize, cParams: ZSTD_compressionParameters, dictLoadMethod: ZSTD_dictLoadMethod_e, ) -> usize; } extern "C" { pub fn ZSTD_estimateDDictSize( dictSize: usize, dictLoadMethod: ZSTD_dictLoadMethod_e, ) -> usize; } extern "C" { #[doc = " ZSTD_initStatic*() :\n Initialize an object using a pre-allocated fixed-size buffer.\n workspace: The memory area to emplace the object into.\n Provided pointer *must be 8-bytes aligned*.\n Buffer must outlive object.\n workspaceSize: Use ZSTD_estimate*Size() to determine\n how large workspace must be to support target scenario.\n @return : pointer to object (same address as workspace, just different type),\n or NULL if error (size too small, incorrect alignment, etc.)\n Note : zstd will never resize nor malloc() when using a static buffer.\n If the object requires more memory than available,\n zstd will just error out (typically ZSTD_error_memory_allocation).\n Note 2 : there is no corresponding \"free\" function.\n Since workspace is allocated externally, it must be freed externally too.\n Note 3 : cParams : use ZSTD_getCParams() to convert a compression level\n into its associated cParams.\n Limitation 1 : currently not compatible with internal dictionary creation, triggered by\n ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().\n Limitation 2 : static cctx currently not compatible with multi-threading.\n Limitation 3 : static dctx is incompatible with legacy support."] pub fn ZSTD_initStaticCCtx( workspace: *mut ::core::ffi::c_void, workspaceSize: usize, ) -> *mut ZSTD_CCtx; } extern "C" { pub fn ZSTD_initStaticCStream( workspace: *mut ::core::ffi::c_void, workspaceSize: usize, ) -> *mut ZSTD_CStream; } extern "C" { pub fn ZSTD_initStaticDCtx( workspace: *mut ::core::ffi::c_void, workspaceSize: usize, ) -> *mut ZSTD_DCtx; } extern "C" { pub fn ZSTD_initStaticDStream( workspace: *mut ::core::ffi::c_void, workspaceSize: usize, ) -> *mut ZSTD_DStream; } extern "C" { pub fn ZSTD_initStaticCDict( workspace: *mut ::core::ffi::c_void, workspaceSize: usize, dict: *const ::core::ffi::c_void, dictSize: usize, dictLoadMethod: ZSTD_dictLoadMethod_e, dictContentType: ZSTD_dictContentType_e, cParams: ZSTD_compressionParameters, ) -> *const ZSTD_CDict; } extern "C" { pub fn ZSTD_initStaticDDict( workspace: *mut ::core::ffi::c_void, workspaceSize: usize, dict: *const ::core::ffi::c_void, dictSize: usize, dictLoadMethod: ZSTD_dictLoadMethod_e, dictContentType: ZSTD_dictContentType_e, ) -> *const ZSTD_DDict; } #[doc = " Custom memory allocation :\n These prototypes make it possible to pass your own allocation/free functions.\n ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.\n All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones."] pub type ZSTD_allocFunction = ::core::option::Option< unsafe extern "C" fn( opaque: *mut ::core::ffi::c_void, size: usize, ) -> *mut ::core::ffi::c_void, >; pub type ZSTD_freeFunction = ::core::option::Option< unsafe extern "C" fn( opaque: *mut ::core::ffi::c_void, address: *mut ::core::ffi::c_void, ), >; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_customMem { pub customAlloc: ZSTD_allocFunction, pub customFree: ZSTD_freeFunction, pub opaque: *mut ::core::ffi::c_void, } extern "C" { #[doc = "< this constant defers to stdlib's functions"] pub static ZSTD_defaultCMem: ZSTD_customMem; } extern "C" { pub fn ZSTD_createCCtx_advanced( customMem: ZSTD_customMem, ) -> *mut ZSTD_CCtx; } extern "C" { pub fn ZSTD_createCStream_advanced( customMem: ZSTD_customMem, ) -> *mut ZSTD_CStream; } extern "C" { pub fn ZSTD_createDCtx_advanced( customMem: ZSTD_customMem, ) -> *mut ZSTD_DCtx; } extern "C" { pub fn ZSTD_createDStream_advanced( customMem: ZSTD_customMem, ) -> *mut ZSTD_DStream; } extern "C" { pub fn ZSTD_createCDict_advanced( dict: *const ::core::ffi::c_void, dictSize: usize, dictLoadMethod: ZSTD_dictLoadMethod_e, dictContentType: ZSTD_dictContentType_e, cParams: ZSTD_compressionParameters, customMem: ZSTD_customMem, ) -> *mut ZSTD_CDict; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct POOL_ctx_s { _unused: [u8; 0], } #[doc = " Thread pool :\n These prototypes make it possible to share a thread pool among multiple compression contexts.\n This can limit resources for applications with multiple threads where each one uses\n a threaded compression mode (via ZSTD_c_nbWorkers parameter).\n ZSTD_createThreadPool creates a new thread pool with a given number of threads.\n Note that the lifetime of such pool must exist while being used.\n ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value\n to use an internal thread pool).\n ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer."] pub type ZSTD_threadPool = POOL_ctx_s; extern "C" { pub fn ZSTD_createThreadPool(numThreads: usize) -> *mut ZSTD_threadPool; } extern "C" { pub fn ZSTD_freeThreadPool(pool: *mut ZSTD_threadPool); } extern "C" { pub fn ZSTD_CCtx_refThreadPool( cctx: *mut ZSTD_CCtx, pool: *mut ZSTD_threadPool, ) -> usize; } extern "C" { pub fn ZSTD_createCDict_advanced2( dict: *const ::core::ffi::c_void, dictSize: usize, dictLoadMethod: ZSTD_dictLoadMethod_e, dictContentType: ZSTD_dictContentType_e, cctxParams: *const ZSTD_CCtx_params, customMem: ZSTD_customMem, ) -> *mut ZSTD_CDict; } extern "C" { pub fn ZSTD_createDDict_advanced( dict: *const ::core::ffi::c_void, dictSize: usize, dictLoadMethod: ZSTD_dictLoadMethod_e, dictContentType: ZSTD_dictContentType_e, customMem: ZSTD_customMem, ) -> *mut ZSTD_DDict; } extern "C" { #[doc = " ZSTD_createCDict_byReference() :\n Create a digested dictionary for compression\n Dictionary content is just referenced, not duplicated.\n As a consequence, `dictBuffer` **must** outlive CDict,\n and its content must remain unmodified throughout the lifetime of CDict.\n note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef"] pub fn ZSTD_createCDict_byReference( dictBuffer: *const ::core::ffi::c_void, dictSize: usize, compressionLevel: ::core::ffi::c_int, ) -> *mut ZSTD_CDict; } extern "C" { #[doc = " ZSTD_getCParams() :\n @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.\n `estimatedSrcSize` value is optional, select 0 if not known"] pub fn ZSTD_getCParams( compressionLevel: ::core::ffi::c_int, estimatedSrcSize: ::core::ffi::c_ulonglong, dictSize: usize, ) -> ZSTD_compressionParameters; } extern "C" { #[doc = " ZSTD_getParams() :\n same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.\n All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0"] pub fn ZSTD_getParams( compressionLevel: ::core::ffi::c_int, estimatedSrcSize: ::core::ffi::c_ulonglong, dictSize: usize, ) -> ZSTD_parameters; } extern "C" { #[doc = " ZSTD_checkCParams() :\n Ensure param values remain within authorized range.\n @return 0 on success, or an error code (can be checked with ZSTD_isError())"] pub fn ZSTD_checkCParams(params: ZSTD_compressionParameters) -> usize; } extern "C" { #[doc = " ZSTD_adjustCParams() :\n optimize params for a given `srcSize` and `dictSize`.\n `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.\n `dictSize` must be `0` when there is no dictionary.\n cPar can be invalid : all parameters will be clamped within valid range in the @return struct.\n This function never fails (wide contract)"] pub fn ZSTD_adjustCParams( cPar: ZSTD_compressionParameters, srcSize: ::core::ffi::c_ulonglong, dictSize: usize, ) -> ZSTD_compressionParameters; } extern "C" { #[doc = " ZSTD_CCtx_setCParams() :\n Set all parameters provided within @p cparams into the working @p cctx.\n Note : if modifying parameters during compression (MT mode only),\n note that changes to the .windowLog parameter will be ignored.\n @return 0 on success, or an error code (can be checked with ZSTD_isError()).\n On failure, no parameters are updated."] pub fn ZSTD_CCtx_setCParams( cctx: *mut ZSTD_CCtx, cparams: ZSTD_compressionParameters, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_setFParams() :\n Set all parameters provided within @p fparams into the working @p cctx.\n @return 0 on success, or an error code (can be checked with ZSTD_isError())."] pub fn ZSTD_CCtx_setFParams( cctx: *mut ZSTD_CCtx, fparams: ZSTD_frameParameters, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_setParams() :\n Set all parameters provided within @p params into the working @p cctx.\n @return 0 on success, or an error code (can be checked with ZSTD_isError())."] pub fn ZSTD_CCtx_setParams( cctx: *mut ZSTD_CCtx, params: ZSTD_parameters, ) -> usize; } extern "C" { #[doc = " ZSTD_compress_advanced() :\n Note : this function is now DEPRECATED.\n It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.\n This prototype will generate compilation warnings."] pub fn ZSTD_compress_advanced( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, dict: *const ::core::ffi::c_void, dictSize: usize, params: ZSTD_parameters, ) -> usize; } extern "C" { #[doc = " ZSTD_compress_usingCDict_advanced() :\n Note : this function is now DEPRECATED.\n It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.\n This prototype will generate compilation warnings."] pub fn ZSTD_compress_usingCDict_advanced( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, cdict: *const ZSTD_CDict, fParams: ZSTD_frameParameters, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_loadDictionary_byReference() :\n Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.\n It saves some memory, but also requires that `dict` outlives its usage within `cctx`"] pub fn ZSTD_CCtx_loadDictionary_byReference( cctx: *mut ZSTD_CCtx, dict: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_loadDictionary_advanced() :\n Same as ZSTD_CCtx_loadDictionary(), but gives finer control over\n how to load the dictionary (by copy ? by reference ?)\n and how to interpret it (automatic ? force raw mode ? full mode only ?)"] pub fn ZSTD_CCtx_loadDictionary_advanced( cctx: *mut ZSTD_CCtx, dict: *const ::core::ffi::c_void, dictSize: usize, dictLoadMethod: ZSTD_dictLoadMethod_e, dictContentType: ZSTD_dictContentType_e, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_refPrefix_advanced() :\n Same as ZSTD_CCtx_refPrefix(), but gives finer control over\n how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?)"] pub fn ZSTD_CCtx_refPrefix_advanced( cctx: *mut ZSTD_CCtx, prefix: *const ::core::ffi::c_void, prefixSize: usize, dictContentType: ZSTD_dictContentType_e, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_getParameter() :\n Get the requested compression parameter value, selected by enum ZSTD_cParameter,\n and store it into int* value.\n @return : 0, or an error code (which can be tested with ZSTD_isError())."] pub fn ZSTD_CCtx_getParameter( cctx: *const ZSTD_CCtx, param: ZSTD_cParameter, value: *mut ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_params :\n Quick howto :\n - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure\n - ZSTD_CCtxParams_setParameter() : Push parameters one by one into\n an existing ZSTD_CCtx_params structure.\n This is similar to\n ZSTD_CCtx_setParameter().\n - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to\n an existing CCtx.\n These parameters will be applied to\n all subsequent frames.\n - ZSTD_compressStream2() : Do compression using the CCtx.\n - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer.\n\n This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()\n for static allocation of CCtx for single-threaded compression."] pub fn ZSTD_createCCtxParams() -> *mut ZSTD_CCtx_params; } extern "C" { pub fn ZSTD_freeCCtxParams(params: *mut ZSTD_CCtx_params) -> usize; } extern "C" { #[doc = " ZSTD_CCtxParams_reset() :\n Reset params to default values."] pub fn ZSTD_CCtxParams_reset(params: *mut ZSTD_CCtx_params) -> usize; } extern "C" { #[doc = " ZSTD_CCtxParams_init() :\n Initializes the compression parameters of cctxParams according to\n compression level. All other parameters are reset to their default values."] pub fn ZSTD_CCtxParams_init( cctxParams: *mut ZSTD_CCtx_params, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtxParams_init_advanced() :\n Initializes the compression and frame parameters of cctxParams according to\n params. All other parameters are reset to their default values."] pub fn ZSTD_CCtxParams_init_advanced( cctxParams: *mut ZSTD_CCtx_params, params: ZSTD_parameters, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtxParams_setParameter() : Requires v1.4.0+\n Similar to ZSTD_CCtx_setParameter.\n Set one compression parameter, selected by enum ZSTD_cParameter.\n Parameters must be applied to a ZSTD_CCtx using\n ZSTD_CCtx_setParametersUsingCCtxParams().\n @result : a code representing success or failure (which can be tested with\n ZSTD_isError())."] pub fn ZSTD_CCtxParams_setParameter( params: *mut ZSTD_CCtx_params, param: ZSTD_cParameter, value: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtxParams_getParameter() :\n Similar to ZSTD_CCtx_getParameter.\n Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.\n @result : 0, or an error code (which can be tested with ZSTD_isError())."] pub fn ZSTD_CCtxParams_getParameter( params: *const ZSTD_CCtx_params, param: ZSTD_cParameter, value: *mut ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_setParametersUsingCCtxParams() :\n Apply a set of ZSTD_CCtx_params to the compression context.\n This can be done even after compression is started,\n if nbWorkers==0, this will have no impact until a new compression is started.\n if nbWorkers>=1, new parameters will be picked up at next job,\n with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated)."] pub fn ZSTD_CCtx_setParametersUsingCCtxParams( cctx: *mut ZSTD_CCtx, params: *const ZSTD_CCtx_params, ) -> usize; } extern "C" { #[doc = " ZSTD_compressStream2_simpleArgs() :\n Same as ZSTD_compressStream2(),\n but using only integral types as arguments.\n This variant might be helpful for binders from dynamic languages\n which have troubles handling structures containing memory pointers."] pub fn ZSTD_compressStream2_simpleArgs( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, dstPos: *mut usize, src: *const ::core::ffi::c_void, srcSize: usize, srcPos: *mut usize, endOp: ZSTD_EndDirective, ) -> usize; } extern "C" { #[doc = " ZSTD_isFrame() :\n Tells if the content of `buffer` starts with a valid Frame Identifier.\n Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.\n Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.\n Note 3 : Skippable Frame Identifiers are considered valid."] pub fn ZSTD_isFrame( buffer: *const ::core::ffi::c_void, size: usize, ) -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_createDDict_byReference() :\n Create a digested dictionary, ready to start decompression operation without startup delay.\n Dictionary content is referenced, and therefore stays in dictBuffer.\n It is important that dictBuffer outlives DDict,\n it must remain read accessible throughout the lifetime of DDict"] pub fn ZSTD_createDDict_byReference( dictBuffer: *const ::core::ffi::c_void, dictSize: usize, ) -> *mut ZSTD_DDict; } extern "C" { #[doc = " ZSTD_DCtx_loadDictionary_byReference() :\n Same as ZSTD_DCtx_loadDictionary(),\n but references `dict` content instead of copying it into `dctx`.\n This saves memory if `dict` remains around.,\n However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression."] pub fn ZSTD_DCtx_loadDictionary_byReference( dctx: *mut ZSTD_DCtx, dict: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_loadDictionary_advanced() :\n Same as ZSTD_DCtx_loadDictionary(),\n but gives direct control over\n how to load the dictionary (by copy ? by reference ?)\n and how to interpret it (automatic ? force raw mode ? full mode only ?)."] pub fn ZSTD_DCtx_loadDictionary_advanced( dctx: *mut ZSTD_DCtx, dict: *const ::core::ffi::c_void, dictSize: usize, dictLoadMethod: ZSTD_dictLoadMethod_e, dictContentType: ZSTD_dictContentType_e, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_refPrefix_advanced() :\n Same as ZSTD_DCtx_refPrefix(), but gives finer control over\n how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?)"] pub fn ZSTD_DCtx_refPrefix_advanced( dctx: *mut ZSTD_DCtx, prefix: *const ::core::ffi::c_void, prefixSize: usize, dictContentType: ZSTD_dictContentType_e, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_setMaxWindowSize() :\n Refuses allocating internal buffers for frames requiring a window size larger than provided limit.\n This protects a decoder context from reserving too much memory for itself (potential attack scenario).\n This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.\n By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)\n @return : 0, or an error code (which can be tested using ZSTD_isError())."] pub fn ZSTD_DCtx_setMaxWindowSize( dctx: *mut ZSTD_DCtx, maxWindowSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_getParameter() :\n Get the requested decompression parameter value, selected by enum ZSTD_dParameter,\n and store it into int* value.\n @return : 0, or an error code (which can be tested with ZSTD_isError())."] pub fn ZSTD_DCtx_getParameter( dctx: *mut ZSTD_DCtx, param: ZSTD_dParameter, value: *mut ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_setFormat() :\n This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter().\n Instruct the decoder context about what kind of data to decode next.\n This instruction is mandatory to decode data without a fully-formed header,\n such ZSTD_f_zstd1_magicless for example.\n @return : 0, or an error code (which can be tested using ZSTD_isError())."] pub fn ZSTD_DCtx_setFormat( dctx: *mut ZSTD_DCtx, format: ZSTD_format_e, ) -> usize; } extern "C" { #[doc = " ZSTD_decompressStream_simpleArgs() :\n Same as ZSTD_decompressStream(),\n but using only integral types as arguments.\n This can be helpful for binders from dynamic languages\n which have troubles handling structures containing memory pointers."] pub fn ZSTD_decompressStream_simpleArgs( dctx: *mut ZSTD_DCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, dstPos: *mut usize, src: *const ::core::ffi::c_void, srcSize: usize, srcPos: *mut usize, ) -> usize; } extern "C" { #[doc = " ZSTD_initCStream_srcSize() :\n This function is DEPRECATED, and equivalent to:\n ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)\n ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);\n ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\n\n pledgedSrcSize must be correct. If it is not known at init time, use\n ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,\n \"0\" also disables frame content size field. It may be enabled in the future.\n This prototype will generate compilation warnings."] pub fn ZSTD_initCStream_srcSize( zcs: *mut ZSTD_CStream, compressionLevel: ::core::ffi::c_int, pledgedSrcSize: ::core::ffi::c_ulonglong, ) -> usize; } extern "C" { #[doc = " ZSTD_initCStream_usingDict() :\n This function is DEPRECATED, and is equivalent to:\n ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);\n ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);\n\n Creates of an internal CDict (incompatible with static CCtx), except if\n dict == NULL or dictSize < 8, in which case no dict is used.\n Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if\n it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.\n This prototype will generate compilation warnings."] pub fn ZSTD_initCStream_usingDict( zcs: *mut ZSTD_CStream, dict: *const ::core::ffi::c_void, dictSize: usize, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_initCStream_advanced() :\n This function is DEPRECATED, and is equivalent to:\n ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n ZSTD_CCtx_setParams(zcs, params);\n ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\n ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);\n\n dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.\n pledgedSrcSize must be correct.\n If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.\n This prototype will generate compilation warnings."] pub fn ZSTD_initCStream_advanced( zcs: *mut ZSTD_CStream, dict: *const ::core::ffi::c_void, dictSize: usize, params: ZSTD_parameters, pledgedSrcSize: ::core::ffi::c_ulonglong, ) -> usize; } extern "C" { #[doc = " ZSTD_initCStream_usingCDict() :\n This function is DEPRECATED, and equivalent to:\n ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n ZSTD_CCtx_refCDict(zcs, cdict);\n\n note : cdict will just be referenced, and must outlive compression session\n This prototype will generate compilation warnings."] pub fn ZSTD_initCStream_usingCDict( zcs: *mut ZSTD_CStream, cdict: *const ZSTD_CDict, ) -> usize; } extern "C" { #[doc = " ZSTD_initCStream_usingCDict_advanced() :\n This function is DEPRECATED, and is equivalent to:\n ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n ZSTD_CCtx_setFParams(zcs, fParams);\n ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\n ZSTD_CCtx_refCDict(zcs, cdict);\n\n same as ZSTD_initCStream_usingCDict(), with control over frame parameters.\n pledgedSrcSize must be correct. If srcSize is not known at init time, use\n value ZSTD_CONTENTSIZE_UNKNOWN.\n This prototype will generate compilation warnings."] pub fn ZSTD_initCStream_usingCDict_advanced( zcs: *mut ZSTD_CStream, cdict: *const ZSTD_CDict, fParams: ZSTD_frameParameters, pledgedSrcSize: ::core::ffi::c_ulonglong, ) -> usize; } extern "C" { #[doc = " ZSTD_resetCStream() :\n This function is DEPRECATED, and is equivalent to:\n ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\n Note: ZSTD_resetCStream() interprets pledgedSrcSize == 0 as ZSTD_CONTENTSIZE_UNKNOWN, but\n ZSTD_CCtx_setPledgedSrcSize() does not do the same, so ZSTD_CONTENTSIZE_UNKNOWN must be\n explicitly specified.\n\n start a new frame, using same parameters from previous frame.\n This is typically useful to skip dictionary loading stage, since it will reuse it in-place.\n Note that zcs must be init at least once before using ZSTD_resetCStream().\n If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.\n If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.\n For the time being, pledgedSrcSize==0 is interpreted as \"srcSize unknown\" for compatibility with older programs,\n but it will change to mean \"empty\" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.\n @return : 0, or an error code (which can be tested using ZSTD_isError())\n This prototype will generate compilation warnings."] pub fn ZSTD_resetCStream( zcs: *mut ZSTD_CStream, pledgedSrcSize: ::core::ffi::c_ulonglong, ) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_frameProgression { pub ingested: ::core::ffi::c_ulonglong, pub consumed: ::core::ffi::c_ulonglong, pub produced: ::core::ffi::c_ulonglong, pub flushed: ::core::ffi::c_ulonglong, pub currentJobID: ::core::ffi::c_uint, pub nbActiveWorkers: ::core::ffi::c_uint, } extern "C" { pub fn ZSTD_getFrameProgression( cctx: *const ZSTD_CCtx, ) -> ZSTD_frameProgression; } extern "C" { #[doc = " ZSTD_toFlushNow() :\n Tell how many bytes are ready to be flushed immediately.\n Useful for multithreading scenarios (nbWorkers >= 1).\n Probe the oldest active job, defined as oldest job not yet entirely flushed,\n and check its output buffer.\n @return : amount of data stored in oldest job and ready to be flushed immediately.\n if @return == 0, it means either :\n + there is no active job (could be checked with ZSTD_frameProgression()), or\n + oldest job is still actively compressing data,\n but everything it has produced has also been flushed so far,\n therefore flush speed is limited by production speed of oldest job\n irrespective of the speed of concurrent (and newer) jobs."] pub fn ZSTD_toFlushNow(cctx: *mut ZSTD_CCtx) -> usize; } extern "C" { #[doc = " This function is deprecated, and is equivalent to:\n\n ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\n ZSTD_DCtx_loadDictionary(zds, dict, dictSize);\n\n note: no dictionary will be used if dict == NULL or dictSize < 8"] pub fn ZSTD_initDStream_usingDict( zds: *mut ZSTD_DStream, dict: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } extern "C" { #[doc = " This function is deprecated, and is equivalent to:\n\n ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\n ZSTD_DCtx_refDDict(zds, ddict);\n\n note : ddict is referenced, it must outlive decompression session"] pub fn ZSTD_initDStream_usingDDict( zds: *mut ZSTD_DStream, ddict: *const ZSTD_DDict, ) -> usize; } extern "C" { #[doc = " This function is deprecated, and is equivalent to:\n\n ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\n\n reuse decompression parameters from previous init; saves dictionary loading"] pub fn ZSTD_resetDStream(zds: *mut ZSTD_DStream) -> usize; } pub type ZSTD_sequenceProducer_F = ::core::option::Option< unsafe extern "C" fn( sequenceProducerState: *mut ::core::ffi::c_void, outSeqs: *mut ZSTD_Sequence, outSeqsCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, dict: *const ::core::ffi::c_void, dictSize: usize, compressionLevel: ::core::ffi::c_int, windowSize: usize, ) -> usize, >; extern "C" { #[doc = " ZSTD_registerSequenceProducer() :\n Instruct zstd to use a block-level external sequence producer function.\n\n The sequenceProducerState must be initialized by the caller, and the caller is\n responsible for managing its lifetime. This parameter is sticky across\n compressions. It will remain set until the user explicitly resets compression\n parameters.\n\n Sequence producer registration is considered to be an \"advanced parameter\",\n part of the \"advanced API\". This means it will only have an effect on compression\n APIs which respect advanced parameters, such as compress2() and compressStream2().\n Older compression APIs such as compressCCtx(), which predate the introduction of\n \"advanced parameters\", will ignore any external sequence producer setting.\n\n The sequence producer can be \"cleared\" by registering a NULL function pointer. This\n removes all limitations described above in the \"LIMITATIONS\" section of the API docs.\n\n The user is strongly encouraged to read the full API documentation (above) before\n calling this function."] pub fn ZSTD_registerSequenceProducer( cctx: *mut ZSTD_CCtx, sequenceProducerState: *mut ::core::ffi::c_void, sequenceProducer: ZSTD_sequenceProducer_F, ); } extern "C" { #[doc = " ZSTD_CCtxParams_registerSequenceProducer() :\n Same as ZSTD_registerSequenceProducer(), but operates on ZSTD_CCtx_params.\n This is used for accurate size estimation with ZSTD_estimateCCtxSize_usingCCtxParams(),\n which is needed when creating a ZSTD_CCtx with ZSTD_initStaticCCtx().\n\n If you are using the external sequence producer API in a scenario where ZSTD_initStaticCCtx()\n is required, then this function is for you. Otherwise, you probably don't need it.\n\n See tests/zstreamtest.c for example usage."] pub fn ZSTD_CCtxParams_registerSequenceProducer( params: *mut ZSTD_CCtx_params, sequenceProducerState: *mut ::core::ffi::c_void, sequenceProducer: ZSTD_sequenceProducer_F, ); } extern "C" { #[doc = "Buffer-less streaming compression (synchronous mode)\n\nA ZSTD_CCtx object is required to track streaming operations.\nUse ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.\nZSTD_CCtx object can be reused multiple times within successive compression operations.\n\nStart by initializing a context.\nUse ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression.\n\nThen, consume your input using ZSTD_compressContinue().\nThere are some important considerations to keep in mind when using this advanced function :\n- ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.\n- Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.\n- Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.\nWorst case evaluation is provided by ZSTD_compressBound().\nZSTD_compressContinue() doesn't guarantee recover after a failed compression.\n- ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).\nIt remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)\n- ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.\nIn which case, it will \"discard\" the relevant memory section from its history.\n\nFinish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.\nIt's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.\nWithout last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.\n\n`ZSTD_CCtx` object can be reused (ZSTD_compressBegin()) to compress again."] pub fn ZSTD_compressBegin( cctx: *mut ZSTD_CCtx, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { pub fn ZSTD_compressBegin_usingDict( cctx: *mut ZSTD_CCtx, dict: *const ::core::ffi::c_void, dictSize: usize, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { pub fn ZSTD_compressBegin_usingCDict( cctx: *mut ZSTD_CCtx, cdict: *const ZSTD_CDict, ) -> usize; } extern "C" { pub fn ZSTD_copyCCtx( cctx: *mut ZSTD_CCtx, preparedCCtx: *const ZSTD_CCtx, pledgedSrcSize: ::core::ffi::c_ulonglong, ) -> usize; } extern "C" { pub fn ZSTD_compressContinue( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_compressEnd( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_compressBegin_advanced( cctx: *mut ZSTD_CCtx, dict: *const ::core::ffi::c_void, dictSize: usize, params: ZSTD_parameters, pledgedSrcSize: ::core::ffi::c_ulonglong, ) -> usize; } extern "C" { pub fn ZSTD_compressBegin_usingCDict_advanced( cctx: *mut ZSTD_CCtx, cdict: *const ZSTD_CDict, fParams: ZSTD_frameParameters, pledgedSrcSize: ::core::ffi::c_ulonglong, ) -> usize; } extern "C" { #[doc = "Buffer-less streaming decompression (synchronous mode)\n\nA ZSTD_DCtx object is required to track streaming operations.\nUse ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.\nA ZSTD_DCtx object can be reused multiple times.\n\nFirst typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().\nFrame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.\nData fragment must be large enough to ensure successful decoding.\n`ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.\nresult : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.\n>0 : `srcSize` is too small, please provide at least result bytes on next attempt.\nerrorCode, which can be tested using ZSTD_isError().\n\nIt fills a ZSTD_FrameHeader structure with important information to correctly decode the frame,\nsuch as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).\nNote that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.\nAs a consequence, check that values remain within valid application range.\nFor example, do not allocate memory blindly, check that `windowSize` is within expectation.\nEach application can set its own limits, depending on local restrictions.\nFor extended interoperability, it is recommended to support `windowSize` of at least 8 MB.\n\nZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.\nZSTD_decompressContinue() is very sensitive to contiguity,\nif 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,\nor that previous contiguous segment is large enough to properly handle maximum back-reference distance.\nThere are multiple ways to guarantee this condition.\n\nThe most memory efficient way is to use a round buffer of sufficient size.\nSufficient size is determined by invoking ZSTD_decodingBufferSize_min(),\nwhich can return an error code if required value is too large for current system (in 32-bits mode).\nIn a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,\nup to the moment there is not enough room left in the buffer to guarantee decoding another full block,\nwhich maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.\nAt which point, decoding can resume from the beginning of the buffer.\nNote that already decoded data stored in the buffer should be flushed before being overwritten.\n\nThere are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.\n\nFinally, if you control the compression process, you can also ignore all buffer size rules,\nas long as the encoder and decoder progress in \"lock-step\",\naka use exactly the same buffer sizes, break contiguity at the same place, etc.\n\nOnce buffers are setup, start decompression, with ZSTD_decompressBegin().\nIf decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().\n\nThen use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.\nZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().\nZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.\n\nresult of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).\nIt can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.\nIt can also be an error code, which can be tested with ZSTD_isError().\n\nA frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.\nContext can then be reset to start a new decompression.\n\nNote : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().\nThis information is not required to properly decode a frame.\n\n== Special case : skippable frames ==\n\nSkippable frames allow integration of user-defined data into a flow of concatenated frames.\nSkippable frames will be ignored (skipped) by decompressor.\nThe format of skippable frames is as follows :\na) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F\nb) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits\nc) Frame Content - any content (User Data) of length equal to Frame Size\nFor skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.\nFor skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content."] pub fn ZSTD_decodingBufferSize_min( windowSize: ::core::ffi::c_ulonglong, frameContentSize: ::core::ffi::c_ulonglong, ) -> usize; } extern "C" { pub fn ZSTD_decompressBegin(dctx: *mut ZSTD_DCtx) -> usize; } extern "C" { pub fn ZSTD_decompressBegin_usingDict( dctx: *mut ZSTD_DCtx, dict: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_decompressBegin_usingDDict( dctx: *mut ZSTD_DCtx, ddict: *const ZSTD_DDict, ) -> usize; } extern "C" { pub fn ZSTD_nextSrcSizeToDecompress(dctx: *mut ZSTD_DCtx) -> usize; } extern "C" { pub fn ZSTD_decompressContinue( dctx: *mut ZSTD_DCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_copyDCtx(dctx: *mut ZSTD_DCtx, preparedDCtx: *const ZSTD_DCtx); } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_nextInputType_e { ZSTDnit_frameHeader = 0, ZSTDnit_blockHeader = 1, ZSTDnit_block = 2, ZSTDnit_lastBlock = 3, ZSTDnit_checksum = 4, ZSTDnit_skippableFrame = 5, } extern "C" { pub fn ZSTD_nextInputType(dctx: *mut ZSTD_DCtx) -> ZSTD_nextInputType_e; } extern "C" { #[doc = "This API is deprecated in favor of the regular compression API.\nYou can get the frame header down to 2 bytes by setting:\n- ZSTD_c_format = ZSTD_f_zstd1_magicless\n- ZSTD_c_contentSizeFlag = 0\n- ZSTD_c_checksumFlag = 0\n- ZSTD_c_dictIDFlag = 0\n\nThis API is not as well tested as our normal API, so we recommend not using it.\nWe will be removing it in a future version. If the normal API doesn't provide\nthe functionality you need, please open a GitHub issue.\n\nBlock functions produce and decode raw zstd blocks, without frame metadata.\nFrame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).\nBut users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.\n\nA few rules to respect :\n- Compressing and decompressing require a context structure\n+ Use ZSTD_createCCtx() and ZSTD_createDCtx()\n- It is necessary to init context before starting\n+ compression : any ZSTD_compressBegin*() variant, including with dictionary\n+ decompression : any ZSTD_decompressBegin*() variant, including with dictionary\n- Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB\n+ If input is larger than a block size, it's necessary to split input data into multiple blocks\n+ For inputs larger than a single block, consider using regular ZSTD_compress() instead.\nFrame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.\n- When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !\n===> In which case, nothing is produced into `dst` !\n+ User __must__ test for such outcome and deal directly with uncompressed data\n+ A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.\nDoing so would mess up with statistics history, leading to potential data corruption.\n+ ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!\n+ In case of multiple successive blocks, should some of them be uncompressed,\ndecoder must be informed of their existence in order to follow proper history.\nUse ZSTD_insertBlock() for such a case."] pub fn ZSTD_getBlockSize(cctx: *const ZSTD_CCtx) -> usize; } extern "C" { pub fn ZSTD_compressBlock( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_decompressBlock( dctx: *mut ZSTD_DCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_insertBlock( dctx: *mut ZSTD_DCtx, blockStart: *const ::core::ffi::c_void, blockSize: usize, ) -> usize; }
/* This file is auto-generated from the public API of the zstd library. It is released under the same BSD license. BSD License For Zstandard software Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name Facebook, nor Meta, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* automatically generated by rust-bindgen 0.71.1 */ pub const ZSTD_seekTableFooterSize: u32 = 9; pub const ZSTD_SEEKABLE_MAGICNUMBER: u32 = 2408770225; pub const ZSTD_SEEKABLE_MAXFRAMES: u32 = 134217728; pub const ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE: u32 = 1073741824; pub const ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE: i32 = -2; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_seekable_CStream_s { _unused: [u8; 0], } pub type ZSTD_seekable_CStream = ZSTD_seekable_CStream_s; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_seekable_s { _unused: [u8; 0], } pub type ZSTD_seekable = ZSTD_seekable_s; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_seekTable_s { _unused: [u8; 0], } pub type ZSTD_seekTable = ZSTD_seekTable_s; extern "C" { pub fn ZSTD_seekable_createCStream() -> *mut ZSTD_seekable_CStream; } extern "C" { pub fn ZSTD_seekable_freeCStream(zcs: *mut ZSTD_seekable_CStream) -> usize; } extern "C" { pub fn ZSTD_seekable_initCStream( zcs: *mut ZSTD_seekable_CStream, compressionLevel: ::core::ffi::c_int, checksumFlag: ::core::ffi::c_int, maxFrameSize: ::core::ffi::c_uint, ) -> usize; } extern "C" { pub fn ZSTD_seekable_compressStream( zcs: *mut ZSTD_seekable_CStream, output: *mut ZSTD_outBuffer, input: *mut ZSTD_inBuffer, ) -> usize; } extern "C" { pub fn ZSTD_seekable_endFrame( zcs: *mut ZSTD_seekable_CStream, output: *mut ZSTD_outBuffer, ) -> usize; } extern "C" { pub fn ZSTD_seekable_endStream( zcs: *mut ZSTD_seekable_CStream, output: *mut ZSTD_outBuffer, ) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_frameLog_s { _unused: [u8; 0], } pub type ZSTD_frameLog = ZSTD_frameLog_s; extern "C" { pub fn ZSTD_seekable_createFrameLog( checksumFlag: ::core::ffi::c_int, ) -> *mut ZSTD_frameLog; } extern "C" { pub fn ZSTD_seekable_freeFrameLog(fl: *mut ZSTD_frameLog) -> usize; } extern "C" { pub fn ZSTD_seekable_logFrame( fl: *mut ZSTD_frameLog, compressedSize: ::core::ffi::c_uint, decompressedSize: ::core::ffi::c_uint, checksum: ::core::ffi::c_uint, ) -> usize; } extern "C" { pub fn ZSTD_seekable_writeSeekTable( fl: *mut ZSTD_frameLog, output: *mut ZSTD_outBuffer, ) -> usize; } extern "C" { pub fn ZSTD_seekable_create() -> *mut ZSTD_seekable; } extern "C" { pub fn ZSTD_seekable_free(zs: *mut ZSTD_seekable) -> usize; } extern "C" { pub fn ZSTD_seekable_initBuff( zs: *mut ZSTD_seekable, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_seekable_decompress( zs: *mut ZSTD_seekable, dst: *mut ::core::ffi::c_void, dstSize: usize, offset: ::core::ffi::c_ulonglong, ) -> usize; } extern "C" { pub fn ZSTD_seekable_decompressFrame( zs: *mut ZSTD_seekable, dst: *mut ::core::ffi::c_void, dstSize: usize, frameIndex: ::core::ffi::c_uint, ) -> usize; } extern "C" { pub fn ZSTD_seekable_getNumFrames( zs: *const ZSTD_seekable, ) -> ::core::ffi::c_uint; } extern "C" { pub fn ZSTD_seekable_getFrameCompressedOffset( zs: *const ZSTD_seekable, frameIndex: ::core::ffi::c_uint, ) -> ::core::ffi::c_ulonglong; } extern "C" { pub fn ZSTD_seekable_getFrameDecompressedOffset( zs: *const ZSTD_seekable, frameIndex: ::core::ffi::c_uint, ) -> ::core::ffi::c_ulonglong; } extern "C" { pub fn ZSTD_seekable_getFrameCompressedSize( zs: *const ZSTD_seekable, frameIndex: ::core::ffi::c_uint, ) -> usize; } extern "C" { pub fn ZSTD_seekable_getFrameDecompressedSize( zs: *const ZSTD_seekable, frameIndex: ::core::ffi::c_uint, ) -> usize; } extern "C" { pub fn ZSTD_seekable_offsetToFrameIndex( zs: *const ZSTD_seekable, offset: ::core::ffi::c_ulonglong, ) -> ::core::ffi::c_uint; } extern "C" { pub fn ZSTD_seekTable_create_fromSeekable( zs: *const ZSTD_seekable, ) -> *mut ZSTD_seekTable; } extern "C" { pub fn ZSTD_seekTable_free(st: *mut ZSTD_seekTable) -> usize; } extern "C" { pub fn ZSTD_seekTable_getNumFrames( st: *const ZSTD_seekTable, ) -> ::core::ffi::c_uint; } extern "C" { pub fn ZSTD_seekTable_getFrameCompressedOffset( st: *const ZSTD_seekTable, frameIndex: ::core::ffi::c_uint, ) -> ::core::ffi::c_ulonglong; } extern "C" { pub fn ZSTD_seekTable_getFrameDecompressedOffset( st: *const ZSTD_seekTable, frameIndex: ::core::ffi::c_uint, ) -> ::core::ffi::c_ulonglong; } extern "C" { pub fn ZSTD_seekTable_getFrameCompressedSize( st: *const ZSTD_seekTable, frameIndex: ::core::ffi::c_uint, ) -> usize; } extern "C" { pub fn ZSTD_seekTable_getFrameDecompressedSize( st: *const ZSTD_seekTable, frameIndex: ::core::ffi::c_uint, ) -> usize; } extern "C" { pub fn ZSTD_seekTable_offsetToFrameIndex( st: *const ZSTD_seekTable, offset: ::core::ffi::c_ulonglong, ) -> ::core::ffi::c_uint; } pub type ZSTD_seekable_read = ::core::option::Option< unsafe extern "C" fn( opaque: *mut ::core::ffi::c_void, buffer: *mut ::core::ffi::c_void, n: usize, ) -> ::core::ffi::c_int, >; pub type ZSTD_seekable_seek = ::core::option::Option< unsafe extern "C" fn( opaque: *mut ::core::ffi::c_void, offset: ::core::ffi::c_longlong, origin: ::core::ffi::c_int, ) -> ::core::ffi::c_int, >; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_seekable_customFile { pub opaque: *mut ::core::ffi::c_void, pub read: ZSTD_seekable_read, pub seek: ZSTD_seekable_seek, } extern "C" { pub fn ZSTD_seekable_initAdvanced( zs: *mut ZSTD_seekable, src: ZSTD_seekable_customFile, ) -> usize; }
/* This file is auto-generated from the public API of the zstd library. It is released under the same BSD license. BSD License For Zstandard software Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name Facebook, nor Meta, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* automatically generated by rust-bindgen 0.66.1 */ pub const ZSTD_VERSION_MAJOR: u32 = 1; pub const ZSTD_VERSION_MINOR: u32 = 5; pub const ZSTD_VERSION_RELEASE: u32 = 5; pub const ZSTD_VERSION_NUMBER: u32 = 10505; pub const ZSTD_CLEVEL_DEFAULT: u32 = 3; pub const ZSTD_MAGICNUMBER: u32 = 4247762216; pub const ZSTD_MAGIC_DICTIONARY: u32 = 3962610743; pub const ZSTD_MAGIC_SKIPPABLE_START: u32 = 407710288; pub const ZSTD_MAGIC_SKIPPABLE_MASK: u32 = 4294967280; pub const ZSTD_BLOCKSIZELOG_MAX: u32 = 17; pub const ZSTD_BLOCKSIZE_MAX: u32 = 131072; pub const ZSTD_CONTENTSIZE_UNKNOWN: i32 = -1; pub const ZSTD_CONTENTSIZE_ERROR: i32 = -2; pub const ZSTD_FRAMEHEADERSIZE_MAX: u32 = 18; pub const ZSTD_SKIPPABLEHEADERSIZE: u32 = 8; pub const ZSTD_WINDOWLOG_MAX_32: u32 = 30; pub const ZSTD_WINDOWLOG_MAX_64: u32 = 31; pub const ZSTD_WINDOWLOG_MIN: u32 = 10; pub const ZSTD_HASHLOG_MIN: u32 = 6; pub const ZSTD_CHAINLOG_MAX_32: u32 = 29; pub const ZSTD_CHAINLOG_MAX_64: u32 = 30; pub const ZSTD_CHAINLOG_MIN: u32 = 6; pub const ZSTD_SEARCHLOG_MIN: u32 = 1; pub const ZSTD_MINMATCH_MAX: u32 = 7; pub const ZSTD_MINMATCH_MIN: u32 = 3; pub const ZSTD_TARGETLENGTH_MAX: u32 = 131072; pub const ZSTD_TARGETLENGTH_MIN: u32 = 0; pub const ZSTD_BLOCKSIZE_MAX_MIN: u32 = 1024; pub const ZSTD_OVERLAPLOG_MIN: u32 = 0; pub const ZSTD_OVERLAPLOG_MAX: u32 = 9; pub const ZSTD_WINDOWLOG_LIMIT_DEFAULT: u32 = 27; pub const ZSTD_LDM_HASHLOG_MIN: u32 = 6; pub const ZSTD_LDM_MINMATCH_MIN: u32 = 4; pub const ZSTD_LDM_MINMATCH_MAX: u32 = 4096; pub const ZSTD_LDM_BUCKETSIZELOG_MIN: u32 = 1; pub const ZSTD_LDM_BUCKETSIZELOG_MAX: u32 = 8; pub const ZSTD_LDM_HASHRATELOG_MIN: u32 = 0; pub const ZSTD_TARGETCBLOCKSIZE_MIN: u32 = 64; pub const ZSTD_TARGETCBLOCKSIZE_MAX: u32 = 131072; pub const ZSTD_SRCSIZEHINT_MIN: u32 = 0; extern "C" { #[doc = " ZSTD_versionNumber() :\n Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE)."] pub fn ZSTD_versionNumber() -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_versionString() :\n Return runtime library version, like \"1.4.5\". Requires v1.3.0+."] pub fn ZSTD_versionString() -> *const ::core::ffi::c_char; } extern "C" { #[doc = " Simple API\n/\n/*! ZSTD_compress() :\n Compresses `src` content as a single zstd compressed frame into already allocated `dst`.\n NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have\n enough space to successfully compress the data.\n @return : compressed size written into `dst` (<= `dstCapacity),\n or an error code if it fails (which can be tested using ZSTD_isError())."] pub fn ZSTD_compress( dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_decompress() :\n `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.\n `dstCapacity` is an upper bound of originalSize to regenerate.\n If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.\n @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),\n or an errorCode if it fails (which can be tested using ZSTD_isError())."] pub fn ZSTD_decompress( dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, compressedSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_getFrameContentSize( src: *const ::core::ffi::c_void, srcSize: usize, ) -> ::core::ffi::c_ulonglong; } extern "C" { #[doc = " ZSTD_getDecompressedSize() :\n NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().\n Both functions work the same way, but ZSTD_getDecompressedSize() blends\n \"empty\", \"unknown\" and \"error\" results to the same return value (0),\n while ZSTD_getFrameContentSize() gives them separate return values.\n @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise."] pub fn ZSTD_getDecompressedSize( src: *const ::core::ffi::c_void, srcSize: usize, ) -> ::core::ffi::c_ulonglong; } extern "C" { #[doc = " ZSTD_findFrameCompressedSize() : Requires v1.4.0+\n `src` should point to the start of a ZSTD frame or skippable frame.\n `srcSize` must be >= first frame size\n @return : the compressed size of the first frame starting at `src`,\n suitable to pass as `srcSize` to `ZSTD_decompress` or similar,\n or an error code if input is invalid"] pub fn ZSTD_findFrameCompressedSize( src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_compressBound(srcSize: usize) -> usize; } extern "C" { pub fn ZSTD_isError(code: usize) -> ::core::ffi::c_uint; } extern "C" { pub fn ZSTD_getErrorName(code: usize) -> *const ::core::ffi::c_char; } extern "C" { pub fn ZSTD_minCLevel() -> ::core::ffi::c_int; } extern "C" { pub fn ZSTD_maxCLevel() -> ::core::ffi::c_int; } extern "C" { pub fn ZSTD_defaultCLevel() -> ::core::ffi::c_int; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_CCtx_s { _unused: [u8; 0], } #[doc = " Explicit context"] pub type ZSTD_CCtx = ZSTD_CCtx_s; extern "C" { pub fn ZSTD_createCCtx() -> *mut ZSTD_CCtx; } extern "C" { pub fn ZSTD_freeCCtx(cctx: *mut ZSTD_CCtx) -> usize; } extern "C" { #[doc = " ZSTD_compressCCtx() :\n Same as ZSTD_compress(), using an explicit ZSTD_CCtx.\n Important : in order to behave similarly to `ZSTD_compress()`,\n this function compresses at requested compression level,\n __ignoring any other parameter__ .\n If any advanced parameter was set using the advanced API,\n they will all be reset. Only `compressionLevel` remains."] pub fn ZSTD_compressCCtx( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, compressionLevel: ::core::ffi::c_int, ) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_DCtx_s { _unused: [u8; 0], } pub type ZSTD_DCtx = ZSTD_DCtx_s; extern "C" { pub fn ZSTD_createDCtx() -> *mut ZSTD_DCtx; } extern "C" { pub fn ZSTD_freeDCtx(dctx: *mut ZSTD_DCtx) -> usize; } extern "C" { #[doc = " ZSTD_decompressDCtx() :\n Same as ZSTD_decompress(),\n requires an allocated ZSTD_DCtx.\n Compatible with sticky parameters."] pub fn ZSTD_decompressDCtx( dctx: *mut ZSTD_DCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } #[repr(u32)] #[doc = " Advanced compression API (Requires v1.4.0+)"] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_strategy { ZSTD_fast = 1, ZSTD_dfast = 2, ZSTD_greedy = 3, ZSTD_lazy = 4, ZSTD_lazy2 = 5, ZSTD_btlazy2 = 6, ZSTD_btopt = 7, ZSTD_btultra = 8, ZSTD_btultra2 = 9, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_cParameter { ZSTD_c_compressionLevel = 100, ZSTD_c_windowLog = 101, ZSTD_c_hashLog = 102, ZSTD_c_chainLog = 103, ZSTD_c_searchLog = 104, ZSTD_c_minMatch = 105, ZSTD_c_targetLength = 106, ZSTD_c_strategy = 107, ZSTD_c_enableLongDistanceMatching = 160, ZSTD_c_ldmHashLog = 161, ZSTD_c_ldmMinMatch = 162, ZSTD_c_ldmBucketSizeLog = 163, ZSTD_c_ldmHashRateLog = 164, ZSTD_c_contentSizeFlag = 200, ZSTD_c_checksumFlag = 201, ZSTD_c_dictIDFlag = 202, ZSTD_c_nbWorkers = 400, ZSTD_c_jobSize = 401, ZSTD_c_overlapLog = 402, ZSTD_c_experimentalParam1 = 500, ZSTD_c_experimentalParam2 = 10, ZSTD_c_experimentalParam3 = 1000, ZSTD_c_experimentalParam4 = 1001, ZSTD_c_experimentalParam5 = 1002, ZSTD_c_experimentalParam6 = 1003, ZSTD_c_experimentalParam7 = 1004, ZSTD_c_experimentalParam8 = 1005, ZSTD_c_experimentalParam9 = 1006, ZSTD_c_experimentalParam10 = 1007, ZSTD_c_experimentalParam11 = 1008, ZSTD_c_experimentalParam12 = 1009, ZSTD_c_experimentalParam13 = 1010, ZSTD_c_experimentalParam14 = 1011, ZSTD_c_experimentalParam15 = 1012, ZSTD_c_experimentalParam16 = 1013, ZSTD_c_experimentalParam17 = 1014, ZSTD_c_experimentalParam18 = 1015, ZSTD_c_experimentalParam19 = 1016, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_bounds { pub error: usize, pub lowerBound: ::core::ffi::c_int, pub upperBound: ::core::ffi::c_int, } extern "C" { #[doc = " ZSTD_cParam_getBounds() :\n All parameters must belong to an interval with lower and upper bounds,\n otherwise they will either trigger an error or be automatically clamped.\n @return : a structure, ZSTD_bounds, which contains\n - an error status field, which must be tested using ZSTD_isError()\n - lower and upper bounds, both inclusive"] pub fn ZSTD_cParam_getBounds(cParam: ZSTD_cParameter) -> ZSTD_bounds; } extern "C" { #[doc = " ZSTD_CCtx_setParameter() :\n Set one compression parameter, selected by enum ZSTD_cParameter.\n All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds().\n Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).\n Setting a parameter is generally only possible during frame initialization (before starting compression).\n Exception : when using multi-threading mode (nbWorkers >= 1),\n the following parameters can be updated _during_ compression (within same frame):\n => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.\n new parameters will be active for next job only (after a flush()).\n @return : an error code (which can be tested using ZSTD_isError())."] pub fn ZSTD_CCtx_setParameter( cctx: *mut ZSTD_CCtx, param: ZSTD_cParameter, value: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_setPledgedSrcSize() :\n Total input data size to be compressed as a single frame.\n Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag.\n This value will also be controlled at end of frame, and trigger an error if not respected.\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame.\n In order to mean \"unknown content size\", pass constant ZSTD_CONTENTSIZE_UNKNOWN.\n ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame.\n Note 2 : pledgedSrcSize is only valid once, for the next frame.\n It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN.\n Note 3 : Whenever all input data is provided and consumed in a single round,\n for example with ZSTD_compress2(),\n or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),\n this value is automatically overridden by srcSize instead."] pub fn ZSTD_CCtx_setPledgedSrcSize( cctx: *mut ZSTD_CCtx, pledgedSrcSize: ::core::ffi::c_ulonglong, ) -> usize; } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_ResetDirective { ZSTD_reset_session_only = 1, ZSTD_reset_parameters = 2, ZSTD_reset_session_and_parameters = 3, } extern "C" { #[doc = " ZSTD_CCtx_reset() :\n There are 2 different things that can be reset, independently or jointly :\n - The session : will stop compressing current frame, and make CCtx ready to start a new one.\n Useful after an error, or to interrupt any ongoing compression.\n Any internal data not yet flushed is cancelled.\n Compression parameters and dictionary remain unchanged.\n They will be used to compress next frame.\n Resetting session never fails.\n - The parameters : changes all parameters back to \"default\".\n This also removes any reference to any dictionary or external sequence producer.\n Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)\n otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())\n - Both : similar to resetting the session, followed by resetting parameters."] pub fn ZSTD_CCtx_reset( cctx: *mut ZSTD_CCtx, reset: ZSTD_ResetDirective, ) -> usize; } extern "C" { #[doc = " ZSTD_compress2() :\n Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.\n ZSTD_compress2() always starts a new frame.\n Should cctx hold data from a previously unfinished frame, everything about it is forgotten.\n - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\n - The function is always blocking, returns when compression is completed.\n NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have\n enough space to successfully compress the data, though it is possible it fails for other reasons.\n @return : compressed size written into `dst` (<= `dstCapacity),\n or an error code if it fails (which can be tested using ZSTD_isError())."] pub fn ZSTD_compress2( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } #[repr(u32)] #[doc = " Advanced decompression API (Requires v1.4.0+)"] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_dParameter { ZSTD_d_windowLogMax = 100, ZSTD_d_experimentalParam1 = 1000, ZSTD_d_experimentalParam2 = 1001, ZSTD_d_experimentalParam3 = 1002, ZSTD_d_experimentalParam4 = 1003, ZSTD_d_experimentalParam5 = 1004, } extern "C" { #[doc = " ZSTD_dParam_getBounds() :\n All parameters must belong to an interval with lower and upper bounds,\n otherwise they will either trigger an error or be automatically clamped.\n @return : a structure, ZSTD_bounds, which contains\n - an error status field, which must be tested using ZSTD_isError()\n - both lower and upper bounds, inclusive"] pub fn ZSTD_dParam_getBounds(dParam: ZSTD_dParameter) -> ZSTD_bounds; } extern "C" { #[doc = " ZSTD_DCtx_setParameter() :\n Set one compression parameter, selected by enum ZSTD_dParameter.\n All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().\n Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).\n Setting a parameter is only possible during frame initialization (before starting decompression).\n @return : 0, or an error code (which can be tested using ZSTD_isError())."] pub fn ZSTD_DCtx_setParameter( dctx: *mut ZSTD_DCtx, param: ZSTD_dParameter, value: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_reset() :\n Return a DCtx to clean state.\n Session and parameters can be reset jointly or separately.\n Parameters can only be reset when no active frame is being decompressed.\n @return : 0, or an error code, which can be tested with ZSTD_isError()"] pub fn ZSTD_DCtx_reset( dctx: *mut ZSTD_DCtx, reset: ZSTD_ResetDirective, ) -> usize; } #[doc = " Streaming"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_inBuffer_s { #[doc = "< start of input buffer"] pub src: *const ::core::ffi::c_void, #[doc = "< size of input buffer"] pub size: usize, #[doc = "< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size"] pub pos: usize, } #[doc = " Streaming"] pub type ZSTD_inBuffer = ZSTD_inBuffer_s; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_outBuffer_s { #[doc = "< start of output buffer"] pub dst: *mut ::core::ffi::c_void, #[doc = "< size of output buffer"] pub size: usize, #[doc = "< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size"] pub pos: usize, } pub type ZSTD_outBuffer = ZSTD_outBuffer_s; pub type ZSTD_CStream = ZSTD_CCtx; extern "C" { pub fn ZSTD_createCStream() -> *mut ZSTD_CStream; } extern "C" { pub fn ZSTD_freeCStream(zcs: *mut ZSTD_CStream) -> usize; } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_EndDirective { ZSTD_e_continue = 0, ZSTD_e_flush = 1, ZSTD_e_end = 2, } extern "C" { #[doc = " ZSTD_compressStream2() : Requires v1.4.0+\n Behaves about the same as ZSTD_compressStream, with additional control on end directive.\n - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\n - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)\n - output->pos must be <= dstCapacity, input->pos must be <= srcSize\n - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.\n - endOp must be a valid directive\n - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.\n - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,\n and then immediately returns, just indicating that there is some data remaining to be flushed.\n The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.\n - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.\n - @return provides a minimum amount of data remaining to be flushed from internal buffers\n or an error code, which can be tested using ZSTD_isError().\n if @return != 0, flush is not fully completed, there is still some data left within internal buffers.\n This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.\n For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.\n - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),\n only ZSTD_e_end or ZSTD_e_flush operations are allowed.\n Before starting a new compression job, or changing compression parameters,\n it is required to fully flush internal buffers."] pub fn ZSTD_compressStream2( cctx: *mut ZSTD_CCtx, output: *mut ZSTD_outBuffer, input: *mut ZSTD_inBuffer, endOp: ZSTD_EndDirective, ) -> usize; } extern "C" { pub fn ZSTD_CStreamInSize() -> usize; } extern "C" { pub fn ZSTD_CStreamOutSize() -> usize; } extern "C" { #[doc = " Equivalent to:\n\n ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)\n ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);\n\n Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API\n to compress with a dictionary."] pub fn ZSTD_initCStream( zcs: *mut ZSTD_CStream, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).\n NOTE: The return value is different. ZSTD_compressStream() returns a hint for\n the next read size (if non-zero and not an error). ZSTD_compressStream2()\n returns the minimum nb of bytes left to flush (if non-zero and not an error)."] pub fn ZSTD_compressStream( zcs: *mut ZSTD_CStream, output: *mut ZSTD_outBuffer, input: *mut ZSTD_inBuffer, ) -> usize; } extern "C" { #[doc = " Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush)."] pub fn ZSTD_flushStream( zcs: *mut ZSTD_CStream, output: *mut ZSTD_outBuffer, ) -> usize; } extern "C" { #[doc = " Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end)."] pub fn ZSTD_endStream( zcs: *mut ZSTD_CStream, output: *mut ZSTD_outBuffer, ) -> usize; } pub type ZSTD_DStream = ZSTD_DCtx; extern "C" { pub fn ZSTD_createDStream() -> *mut ZSTD_DStream; } extern "C" { pub fn ZSTD_freeDStream(zds: *mut ZSTD_DStream) -> usize; } extern "C" { #[doc = " ZSTD_initDStream() :\n Initialize/reset DStream state for new decompression operation.\n Call before new decompression operation using same DStream.\n\n Note : This function is redundant with the advanced API and equivalent to:\n ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\n ZSTD_DCtx_refDDict(zds, NULL);"] pub fn ZSTD_initDStream(zds: *mut ZSTD_DStream) -> usize; } extern "C" { #[doc = " ZSTD_decompressStream() :\n Streaming decompression function.\n Call repetitively to consume full input updating it as necessary.\n Function will update both input and output `pos` fields exposing current state via these fields:\n - `input.pos < input.size`, some input remaining and caller should provide remaining input\n on the next call.\n - `output.pos < output.size`, decoder finished and flushed all remaining buffers.\n - `output.pos == output.size`, potentially uncflushed data present in the internal buffers,\n call ZSTD_decompressStream() again to flush remaining data to output.\n Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX.\n\n @return : 0 when a frame is completely decoded and fully flushed,\n or an error code, which can be tested using ZSTD_isError(),\n or any other value > 0, which means there is some decoding or flushing to do to complete current frame."] pub fn ZSTD_decompressStream( zds: *mut ZSTD_DStream, output: *mut ZSTD_outBuffer, input: *mut ZSTD_inBuffer, ) -> usize; } extern "C" { pub fn ZSTD_DStreamInSize() -> usize; } extern "C" { pub fn ZSTD_DStreamOutSize() -> usize; } extern "C" { #[doc = " Simple dictionary API\n/\n/*! ZSTD_compress_usingDict() :\n Compression at an explicit compression level using a Dictionary.\n A dictionary can be any arbitrary data segment (also called a prefix),\n or a buffer with specified information (see zdict.h).\n Note : This function loads the dictionary, resulting in significant startup delay.\n It's intended for a dictionary used only once.\n Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used."] pub fn ZSTD_compress_usingDict( ctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, dict: *const ::core::ffi::c_void, dictSize: usize, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_decompress_usingDict() :\n Decompression using a known Dictionary.\n Dictionary must be identical to the one used during compression.\n Note : This function loads the dictionary, resulting in significant startup delay.\n It's intended for a dictionary used only once.\n Note : When `dict == NULL || dictSize < 8` no dictionary is used."] pub fn ZSTD_decompress_usingDict( dctx: *mut ZSTD_DCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, dict: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_CDict_s { _unused: [u8; 0], } #[doc = " Bulk processing dictionary API"] pub type ZSTD_CDict = ZSTD_CDict_s; extern "C" { #[doc = " ZSTD_createCDict() :\n When compressing multiple messages or blocks using the same dictionary,\n it's recommended to digest the dictionary only once, since it's a costly operation.\n ZSTD_createCDict() will create a state from digesting a dictionary.\n The resulting state can be used for future compression operations with very limited startup cost.\n ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.\n @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.\n Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.\n Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,\n in which case the only thing that it transports is the @compressionLevel.\n This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,\n expecting a ZSTD_CDict parameter with any data, including those without a known dictionary."] pub fn ZSTD_createCDict( dictBuffer: *const ::core::ffi::c_void, dictSize: usize, compressionLevel: ::core::ffi::c_int, ) -> *mut ZSTD_CDict; } extern "C" { #[doc = " ZSTD_freeCDict() :\n Function frees memory allocated by ZSTD_createCDict().\n If a NULL pointer is passed, no operation is performed."] pub fn ZSTD_freeCDict(CDict: *mut ZSTD_CDict) -> usize; } extern "C" { #[doc = " ZSTD_compress_usingCDict() :\n Compression using a digested Dictionary.\n Recommended when same dictionary is used multiple times.\n Note : compression level is _decided at dictionary creation time_,\n and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no)"] pub fn ZSTD_compress_usingCDict( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, cdict: *const ZSTD_CDict, ) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_DDict_s { _unused: [u8; 0], } pub type ZSTD_DDict = ZSTD_DDict_s; extern "C" { #[doc = " ZSTD_createDDict() :\n Create a digested dictionary, ready to start decompression operation without startup delay.\n dictBuffer can be released after DDict creation, as its content is copied inside DDict."] pub fn ZSTD_createDDict( dictBuffer: *const ::core::ffi::c_void, dictSize: usize, ) -> *mut ZSTD_DDict; } extern "C" { #[doc = " ZSTD_freeDDict() :\n Function frees memory allocated with ZSTD_createDDict()\n If a NULL pointer is passed, no operation is performed."] pub fn ZSTD_freeDDict(ddict: *mut ZSTD_DDict) -> usize; } extern "C" { #[doc = " ZSTD_decompress_usingDDict() :\n Decompression using a digested Dictionary.\n Recommended when same dictionary is used multiple times."] pub fn ZSTD_decompress_usingDDict( dctx: *mut ZSTD_DCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ddict: *const ZSTD_DDict, ) -> usize; } extern "C" { #[doc = " ZSTD_getDictID_fromDict() : Requires v1.4.0+\n Provides the dictID stored within dictionary.\n if @return == 0, the dictionary is not conformant with Zstandard specification.\n It can still be loaded, but as a content-only dictionary."] pub fn ZSTD_getDictID_fromDict( dict: *const ::core::ffi::c_void, dictSize: usize, ) -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_getDictID_fromCDict() : Requires v1.5.0+\n Provides the dictID of the dictionary loaded into `cdict`.\n If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.\n Non-conformant dictionaries can still be loaded, but as content-only dictionaries."] pub fn ZSTD_getDictID_fromCDict( cdict: *const ZSTD_CDict, ) -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_getDictID_fromDDict() : Requires v1.4.0+\n Provides the dictID of the dictionary loaded into `ddict`.\n If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.\n Non-conformant dictionaries can still be loaded, but as content-only dictionaries."] pub fn ZSTD_getDictID_fromDDict( ddict: *const ZSTD_DDict, ) -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_getDictID_fromFrame() : Requires v1.4.0+\n Provides the dictID required to decompressed the frame stored within `src`.\n If @return == 0, the dictID could not be decoded.\n This could for one of the following reasons :\n - The frame does not require a dictionary to be decoded (most common case).\n - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden piece of information.\n Note : this use case also happens when using a non-conformant dictionary.\n - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).\n - This is not a Zstandard frame.\n When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code."] pub fn ZSTD_getDictID_fromFrame( src: *const ::core::ffi::c_void, srcSize: usize, ) -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_CCtx_loadDictionary() : Requires v1.4.0+\n Create an internal CDict from `dict` buffer.\n Decompression will have to use same dictionary.\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,\n meaning \"return to no-dictionary mode\".\n Note 1 : Dictionary is sticky, it will be used for all future compressed frames,\n until parameters are reset, a new dictionary is loaded, or the dictionary\n is explicitly invalidated by loading a NULL dictionary.\n Note 2 : Loading a dictionary involves building tables.\n It's also a CPU consuming operation, with non-negligible impact on latency.\n Tables are dependent on compression parameters, and for this reason,\n compression parameters can no longer be changed after loading a dictionary.\n Note 3 :`dict` content will be copied internally.\n Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.\n In such a case, dictionary buffer must outlive its users.\n Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()\n to precisely select how dictionary content must be interpreted.\n Note 5 : This method does not benefit from LDM (long distance mode).\n If you want to employ LDM on some large dictionary content,\n prefer employing ZSTD_CCtx_refPrefix() described below."] pub fn ZSTD_CCtx_loadDictionary( cctx: *mut ZSTD_CCtx, dict: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_refCDict() : Requires v1.4.0+\n Reference a prepared dictionary, to be used for all future compressed frames.\n Note that compression parameters are enforced from within CDict,\n and supersede any compression parameter previously set within CCtx.\n The parameters ignored are labelled as \"superseded-by-cdict\" in the ZSTD_cParameter enum docs.\n The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.\n The dictionary will remain valid for future compressed frames using same CCtx.\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Special : Referencing a NULL CDict means \"return to no-dictionary mode\".\n Note 1 : Currently, only one dictionary can be managed.\n Referencing a new dictionary effectively \"discards\" any previous one.\n Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx."] pub fn ZSTD_CCtx_refCDict( cctx: *mut ZSTD_CCtx, cdict: *const ZSTD_CDict, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_refPrefix() : Requires v1.4.0+\n Reference a prefix (single-usage dictionary) for next compressed frame.\n A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).\n Decompression will need same prefix to properly regenerate data.\n Compressing with a prefix is similar in outcome as performing a diff and compressing it,\n but performs much faster, especially during decompression (compression speed is tunable with compression level).\n This method is compatible with LDM (long distance mode).\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary\n Note 1 : Prefix buffer is referenced. It **must** outlive compression.\n Its content must remain unmodified during compression.\n Note 2 : If the intention is to diff some large src data blob with some prior version of itself,\n ensure that the window size is large enough to contain the entire source.\n See ZSTD_c_windowLog.\n Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.\n It's a CPU consuming operation, with non-negligible impact on latency.\n If there is a need to use the same prefix multiple times, consider loadDictionary instead.\n Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).\n Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation."] pub fn ZSTD_CCtx_refPrefix( cctx: *mut ZSTD_CCtx, prefix: *const ::core::ffi::c_void, prefixSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_loadDictionary() : Requires v1.4.0+\n Create an internal DDict from dict buffer, to be used to decompress all future frames.\n The dictionary remains valid for all future frames, until explicitly invalidated, or\n a new dictionary is loaded.\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,\n meaning \"return to no-dictionary mode\".\n Note 1 : Loading a dictionary involves building tables,\n which has a non-negligible impact on CPU usage and latency.\n It's recommended to \"load once, use many times\", to amortize the cost\n Note 2 :`dict` content will be copied internally, so `dict` can be released after loading.\n Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead.\n Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of\n how dictionary content is loaded and interpreted."] pub fn ZSTD_DCtx_loadDictionary( dctx: *mut ZSTD_DCtx, dict: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_refDDict() : Requires v1.4.0+\n Reference a prepared dictionary, to be used to decompress next frames.\n The dictionary remains active for decompression of future frames using same DCtx.\n\n If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function\n will store the DDict references in a table, and the DDict used for decompression\n will be determined at decompression time, as per the dict ID in the frame.\n The memory for the table is allocated on the first call to refDDict, and can be\n freed with ZSTD_freeDCtx().\n\n If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary\n will be managed, and referencing a dictionary effectively \"discards\" any previous one.\n\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Special: referencing a NULL DDict means \"return to no-dictionary mode\".\n Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx."] pub fn ZSTD_DCtx_refDDict( dctx: *mut ZSTD_DCtx, ddict: *const ZSTD_DDict, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_refPrefix() : Requires v1.4.0+\n Reference a prefix (single-usage dictionary) to decompress next frame.\n This is the reverse operation of ZSTD_CCtx_refPrefix(),\n and must use the same prefix as the one used during compression.\n Prefix is **only used once**. Reference is discarded at end of frame.\n End of frame is reached when ZSTD_decompressStream() returns 0.\n @result : 0, or an error code (which can be tested with ZSTD_isError()).\n Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary\n Note 2 : Prefix buffer is referenced. It **must** outlive decompression.\n Prefix buffer must remain unmodified up to the end of frame,\n reached when ZSTD_decompressStream() returns 0.\n Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).\n Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)\n Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.\n A full dictionary is more costly, as it requires building tables."] pub fn ZSTD_DCtx_refPrefix( dctx: *mut ZSTD_DCtx, prefix: *const ::core::ffi::c_void, prefixSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_sizeof_*() : Requires v1.4.0+\n These functions give the _current_ memory usage of selected object.\n Note that object memory usage can evolve (increase or decrease) over time."] pub fn ZSTD_sizeof_CCtx(cctx: *const ZSTD_CCtx) -> usize; } extern "C" { pub fn ZSTD_sizeof_DCtx(dctx: *const ZSTD_DCtx) -> usize; } extern "C" { pub fn ZSTD_sizeof_CStream(zcs: *const ZSTD_CStream) -> usize; } extern "C" { pub fn ZSTD_sizeof_DStream(zds: *const ZSTD_DStream) -> usize; } extern "C" { pub fn ZSTD_sizeof_CDict(cdict: *const ZSTD_CDict) -> usize; } extern "C" { pub fn ZSTD_sizeof_DDict(ddict: *const ZSTD_DDict) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_CCtx_params_s { _unused: [u8; 0], } pub type ZSTD_CCtx_params = ZSTD_CCtx_params_s; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_Sequence { pub offset: ::core::ffi::c_uint, pub litLength: ::core::ffi::c_uint, pub matchLength: ::core::ffi::c_uint, pub rep: ::core::ffi::c_uint, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_compressionParameters { #[doc = "< largest match distance : larger == more compression, more memory needed during decompression"] pub windowLog: ::core::ffi::c_uint, #[doc = "< fully searched segment : larger == more compression, slower, more memory (useless for fast)"] pub chainLog: ::core::ffi::c_uint, #[doc = "< dispatch table : larger == faster, more memory"] pub hashLog: ::core::ffi::c_uint, #[doc = "< nb of searches : larger == more compression, slower"] pub searchLog: ::core::ffi::c_uint, #[doc = "< match length searched : larger == faster decompression, sometimes less compression"] pub minMatch: ::core::ffi::c_uint, #[doc = "< acceptable match size for optimal parser (only) : larger == more compression, slower"] pub targetLength: ::core::ffi::c_uint, #[doc = "< see ZSTD_strategy definition above"] pub strategy: ZSTD_strategy, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_frameParameters { #[doc = "< 1: content size will be in frame header (when known)"] pub contentSizeFlag: ::core::ffi::c_int, #[doc = "< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection"] pub checksumFlag: ::core::ffi::c_int, #[doc = "< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression)"] pub noDictIDFlag: ::core::ffi::c_int, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_parameters { pub cParams: ZSTD_compressionParameters, pub fParams: ZSTD_frameParameters, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_dictContentType_e { ZSTD_dct_auto = 0, ZSTD_dct_rawContent = 1, ZSTD_dct_fullDict = 2, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_dictLoadMethod_e { #[doc = "< Copy dictionary content internally"] ZSTD_dlm_byCopy = 0, #[doc = "< Reference dictionary content -- the dictionary buffer must outlive its users."] ZSTD_dlm_byRef = 1, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_format_e { ZSTD_f_zstd1 = 0, ZSTD_f_zstd1_magicless = 1, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_forceIgnoreChecksum_e { ZSTD_d_validateChecksum = 0, ZSTD_d_ignoreChecksum = 1, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_refMultipleDDicts_e { ZSTD_rmd_refSingleDDict = 0, ZSTD_rmd_refMultipleDDicts = 1, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_dictAttachPref_e { ZSTD_dictDefaultAttach = 0, ZSTD_dictForceAttach = 1, ZSTD_dictForceCopy = 2, ZSTD_dictForceLoad = 3, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_literalCompressionMode_e { #[doc = "< Automatically determine the compression mode based on the compression level.\n Negative compression levels will be uncompressed, and positive compression\n levels will be compressed."] ZSTD_lcm_auto = 0, #[doc = "< Always attempt Huffman compression. Uncompressed literals will still be\n emitted if Huffman compression is not profitable."] ZSTD_lcm_huffman = 1, #[doc = "< Always emit uncompressed literals."] ZSTD_lcm_uncompressed = 2, } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_paramSwitch_e { ZSTD_ps_auto = 0, ZSTD_ps_enable = 1, ZSTD_ps_disable = 2, } extern "C" { #[doc = " ZSTD_findDecompressedSize() :\n `src` should point to the start of a series of ZSTD encoded and/or skippable frames\n `srcSize` must be the _exact_ size of this series\n (i.e. there should be a frame boundary at `src + srcSize`)\n @return : - decompressed size of all data in all successive frames\n - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN\n - if an error occurred: ZSTD_CONTENTSIZE_ERROR\n\n note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.\n When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.\n In which case, it's necessary to use streaming mode to decompress data.\n note 2 : decompressed size is always present when compression is done with ZSTD_compress()\n note 3 : decompressed size can be very large (64-bits value),\n potentially larger than what local system can handle as a single memory segment.\n In which case, it's necessary to use streaming mode to decompress data.\n note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.\n Always ensure result fits within application's authorized limits.\n Each application can set its own limits.\n note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to\n read each contained frame header. This is fast as most of the data is skipped,\n however it does mean that all frame data must be present and valid."] pub fn ZSTD_findDecompressedSize( src: *const ::core::ffi::c_void, srcSize: usize, ) -> ::core::ffi::c_ulonglong; } extern "C" { #[doc = " ZSTD_decompressBound() :\n `src` should point to the start of a series of ZSTD encoded and/or skippable frames\n `srcSize` must be the _exact_ size of this series\n (i.e. there should be a frame boundary at `src + srcSize`)\n @return : - upper-bound for the decompressed size of all data in all successive frames\n - if an error occurred: ZSTD_CONTENTSIZE_ERROR\n\n note 1 : an error can occur if `src` contains an invalid or incorrectly formatted frame.\n note 2 : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.\n in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value.\n note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:\n upper-bound = # blocks * min(128 KB, Window_Size)"] pub fn ZSTD_decompressBound( src: *const ::core::ffi::c_void, srcSize: usize, ) -> ::core::ffi::c_ulonglong; } extern "C" { #[doc = " ZSTD_frameHeaderSize() :\n srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.\n @return : size of the Frame Header,\n or an error code (if srcSize is too small)"] pub fn ZSTD_frameHeaderSize( src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_frameType_e { ZSTD_frame = 0, ZSTD_skippableFrame = 1, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_frameHeader { pub frameContentSize: ::core::ffi::c_ulonglong, pub windowSize: ::core::ffi::c_ulonglong, pub blockSizeMax: ::core::ffi::c_uint, pub frameType: ZSTD_frameType_e, pub headerSize: ::core::ffi::c_uint, pub dictID: ::core::ffi::c_uint, pub checksumFlag: ::core::ffi::c_uint, pub _reserved1: ::core::ffi::c_uint, pub _reserved2: ::core::ffi::c_uint, } extern "C" { #[doc = " ZSTD_getFrameHeader() :\n decode Frame Header, or requires larger `srcSize`.\n @return : 0, `zfhPtr` is correctly filled,\n >0, `srcSize` is too small, value is wanted `srcSize` amount,\n or an error code, which can be tested using ZSTD_isError()"] pub fn ZSTD_getFrameHeader( zfhPtr: *mut ZSTD_frameHeader, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_getFrameHeader_advanced() :\n same as ZSTD_getFrameHeader(),\n with added capability to select a format (like ZSTD_f_zstd1_magicless)"] pub fn ZSTD_getFrameHeader_advanced( zfhPtr: *mut ZSTD_frameHeader, src: *const ::core::ffi::c_void, srcSize: usize, format: ZSTD_format_e, ) -> usize; } extern "C" { #[doc = " ZSTD_decompressionMargin() :\n Zstd supports in-place decompression, where the input and output buffers overlap.\n In this case, the output buffer must be at least (Margin + Output_Size) bytes large,\n and the input buffer must be at the end of the output buffer.\n\n _______________________ Output Buffer ________________________\n | |\n | ____ Input Buffer ____|\n | | |\n v v v\n |---------------------------------------|-----------|----------|\n ^ ^ ^\n |___________________ Output_Size ___________________|_ Margin _|\n\n NOTE: See also ZSTD_DECOMPRESSION_MARGIN().\n NOTE: This applies only to single-pass decompression through ZSTD_decompress() or\n ZSTD_decompressDCtx().\n NOTE: This function supports multi-frame input.\n\n @param src The compressed frame(s)\n @param srcSize The size of the compressed frame(s)\n @returns The decompression margin or an error that can be checked with ZSTD_isError()."] pub fn ZSTD_decompressionMargin( src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_sequenceFormat_e { ZSTD_sf_noBlockDelimiters = 0, ZSTD_sf_explicitBlockDelimiters = 1, } extern "C" { #[doc = " ZSTD_sequenceBound() :\n `srcSize` : size of the input buffer\n @return : upper-bound for the number of sequences that can be generated\n from a buffer of srcSize bytes\n\n note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence)."] pub fn ZSTD_sequenceBound(srcSize: usize) -> usize; } extern "C" { #[doc = " ZSTD_generateSequences() :\n Generate sequences using ZSTD_compress2(), given a source buffer.\n\n Each block will end with a dummy sequence\n with offset == 0, matchLength == 0, and litLength == length of last literals.\n litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)\n simply acts as a block delimiter.\n\n @zc can be used to insert custom compression params.\n This function invokes ZSTD_compress2().\n\n The output of this function can be fed into ZSTD_compressSequences() with CCtx\n setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters\n @return : number of sequences generated"] pub fn ZSTD_generateSequences( zc: *mut ZSTD_CCtx, outSeqs: *mut ZSTD_Sequence, outSeqsSize: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_mergeBlockDelimiters() :\n Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals\n by merging them into the literals of the next sequence.\n\n As such, the final generated result has no explicit representation of block boundaries,\n and the final last literals segment is not represented in the sequences.\n\n The output of this function can be fed into ZSTD_compressSequences() with CCtx\n setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters\n @return : number of sequences left after merging"] pub fn ZSTD_mergeBlockDelimiters( sequences: *mut ZSTD_Sequence, seqsSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_compressSequences() :\n Compress an array of ZSTD_Sequence, associated with @src buffer, into dst.\n @src contains the entire input (not just the literals).\n If @srcSize > sum(sequence.length), the remaining bytes are considered all literals\n If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.)\n The entire source is compressed into a single frame.\n\n The compression behavior changes based on cctx params. In particular:\n If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain\n no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on\n the block size derived from the cctx, and sequences may be split. This is the default setting.\n\n If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain\n block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.\n\n If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined\n behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for\n specifics regarding offset/matchlength requirements) then the function will bail out and return an error.\n\n In addition to the two adjustable experimental params, there are other important cctx params.\n - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN.\n - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression.\n - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset\n is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md\n\n Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused.\n Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly,\n and cannot emit an RLE block that disagrees with the repcode history\n @return : final compressed size, or a ZSTD error code."] pub fn ZSTD_compressSequences( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstSize: usize, inSeqs: *const ZSTD_Sequence, inSeqsSize: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_writeSkippableFrame() :\n Generates a zstd skippable frame containing data given by src, and writes it to dst buffer.\n\n Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number,\n ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.\n As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so\n the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.\n\n Returns an error if destination buffer is not large enough, if the source size is not representable\n with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).\n\n @return : number of bytes written or a ZSTD error."] pub fn ZSTD_writeSkippableFrame( dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, magicVariant: ::core::ffi::c_uint, ) -> usize; } extern "C" { #[doc = " ZSTD_readSkippableFrame() :\n Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer.\n\n The parameter magicVariant will receive the magicVariant that was supplied when the frame was written,\n i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested\n in the magicVariant.\n\n Returns an error if destination buffer is not large enough, or if the frame is not skippable.\n\n @return : number of bytes written or a ZSTD error."] pub fn ZSTD_readSkippableFrame( dst: *mut ::core::ffi::c_void, dstCapacity: usize, magicVariant: *mut ::core::ffi::c_uint, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_isSkippableFrame() :\n Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame."] pub fn ZSTD_isSkippableFrame( buffer: *const ::core::ffi::c_void, size: usize, ) -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_estimate*() :\n These functions make it possible to estimate memory usage\n of a future {D,C}Ctx, before its creation.\n\n ZSTD_estimateCCtxSize() will provide a memory budget large enough\n for any compression level up to selected one.\n Note : Unlike ZSTD_estimateCStreamSize*(), this estimate\n does not include space for a window buffer.\n Therefore, the estimation is only guaranteed for single-shot compressions, not streaming.\n The estimate will assume the input may be arbitrarily large,\n which is the worst case.\n\n When srcSize can be bound by a known and rather \"small\" value,\n this fact can be used to provide a tighter estimation\n because the CCtx compression context will need less memory.\n This tighter estimation can be provided by more advanced functions\n ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(),\n and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().\n Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.\n\n Note : only single-threaded compression is supported.\n ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.\n\n Note 2 : ZSTD_estimateCCtxSize* functions are not compatible with the Block-Level Sequence Producer API at this time.\n Size estimates assume that no external sequence producer is registered."] pub fn ZSTD_estimateCCtxSize( compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { pub fn ZSTD_estimateCCtxSize_usingCParams( cParams: ZSTD_compressionParameters, ) -> usize; } extern "C" { pub fn ZSTD_estimateCCtxSize_usingCCtxParams( params: *const ZSTD_CCtx_params, ) -> usize; } extern "C" { pub fn ZSTD_estimateDCtxSize() -> usize; } extern "C" { #[doc = " ZSTD_estimateCStreamSize() :\n ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.\n It will also consider src size to be arbitrarily \"large\", which is worst case.\n If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.\n ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.\n ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.\n Note : CStream size estimation is only correct for single-threaded compression.\n ZSTD_DStream memory budget depends on window Size.\n This information can be passed manually, using ZSTD_estimateDStreamSize,\n or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();\n Note : if streaming is init with function ZSTD_init?Stream_usingDict(),\n an internal ?Dict will be created, which additional size is not estimated here.\n In this case, get total size by adding ZSTD_estimate?DictSize\n Note 2 : only single-threaded compression is supported.\n ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.\n Note 3 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time.\n Size estimates assume that no external sequence producer is registered."] pub fn ZSTD_estimateCStreamSize( compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { pub fn ZSTD_estimateCStreamSize_usingCParams( cParams: ZSTD_compressionParameters, ) -> usize; } extern "C" { pub fn ZSTD_estimateCStreamSize_usingCCtxParams( params: *const ZSTD_CCtx_params, ) -> usize; } extern "C" { pub fn ZSTD_estimateDStreamSize(windowSize: usize) -> usize; } extern "C" { pub fn ZSTD_estimateDStreamSize_fromFrame( src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_estimate?DictSize() :\n ZSTD_estimateCDictSize() will bet that src size is relatively \"small\", and content is copied, like ZSTD_createCDict().\n ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().\n Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller."] pub fn ZSTD_estimateCDictSize( dictSize: usize, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { pub fn ZSTD_estimateCDictSize_advanced( dictSize: usize, cParams: ZSTD_compressionParameters, dictLoadMethod: ZSTD_dictLoadMethod_e, ) -> usize; } extern "C" { pub fn ZSTD_estimateDDictSize( dictSize: usize, dictLoadMethod: ZSTD_dictLoadMethod_e, ) -> usize; } extern "C" { #[doc = " ZSTD_initStatic*() :\n Initialize an object using a pre-allocated fixed-size buffer.\n workspace: The memory area to emplace the object into.\n Provided pointer *must be 8-bytes aligned*.\n Buffer must outlive object.\n workspaceSize: Use ZSTD_estimate*Size() to determine\n how large workspace must be to support target scenario.\n @return : pointer to object (same address as workspace, just different type),\n or NULL if error (size too small, incorrect alignment, etc.)\n Note : zstd will never resize nor malloc() when using a static buffer.\n If the object requires more memory than available,\n zstd will just error out (typically ZSTD_error_memory_allocation).\n Note 2 : there is no corresponding \"free\" function.\n Since workspace is allocated externally, it must be freed externally too.\n Note 3 : cParams : use ZSTD_getCParams() to convert a compression level\n into its associated cParams.\n Limitation 1 : currently not compatible with internal dictionary creation, triggered by\n ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().\n Limitation 2 : static cctx currently not compatible with multi-threading.\n Limitation 3 : static dctx is incompatible with legacy support."] pub fn ZSTD_initStaticCCtx( workspace: *mut ::core::ffi::c_void, workspaceSize: usize, ) -> *mut ZSTD_CCtx; } extern "C" { pub fn ZSTD_initStaticCStream( workspace: *mut ::core::ffi::c_void, workspaceSize: usize, ) -> *mut ZSTD_CStream; } extern "C" { pub fn ZSTD_initStaticDCtx( workspace: *mut ::core::ffi::c_void, workspaceSize: usize, ) -> *mut ZSTD_DCtx; } extern "C" { pub fn ZSTD_initStaticDStream( workspace: *mut ::core::ffi::c_void, workspaceSize: usize, ) -> *mut ZSTD_DStream; } extern "C" { pub fn ZSTD_initStaticCDict( workspace: *mut ::core::ffi::c_void, workspaceSize: usize, dict: *const ::core::ffi::c_void, dictSize: usize, dictLoadMethod: ZSTD_dictLoadMethod_e, dictContentType: ZSTD_dictContentType_e, cParams: ZSTD_compressionParameters, ) -> *const ZSTD_CDict; } extern "C" { pub fn ZSTD_initStaticDDict( workspace: *mut ::core::ffi::c_void, workspaceSize: usize, dict: *const ::core::ffi::c_void, dictSize: usize, dictLoadMethod: ZSTD_dictLoadMethod_e, dictContentType: ZSTD_dictContentType_e, ) -> *const ZSTD_DDict; } #[doc = " Custom memory allocation :\n These prototypes make it possible to pass your own allocation/free functions.\n ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.\n All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones."] pub type ZSTD_allocFunction = ::core::option::Option< unsafe extern "C" fn( opaque: *mut ::core::ffi::c_void, size: usize, ) -> *mut ::core::ffi::c_void, >; pub type ZSTD_freeFunction = ::core::option::Option< unsafe extern "C" fn( opaque: *mut ::core::ffi::c_void, address: *mut ::core::ffi::c_void, ), >; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_customMem { pub customAlloc: ZSTD_allocFunction, pub customFree: ZSTD_freeFunction, pub opaque: *mut ::core::ffi::c_void, } extern "C" { #[doc = "< this constant defers to stdlib's functions"] pub static ZSTD_defaultCMem: ZSTD_customMem; } extern "C" { pub fn ZSTD_createCCtx_advanced( customMem: ZSTD_customMem, ) -> *mut ZSTD_CCtx; } extern "C" { pub fn ZSTD_createCStream_advanced( customMem: ZSTD_customMem, ) -> *mut ZSTD_CStream; } extern "C" { pub fn ZSTD_createDCtx_advanced( customMem: ZSTD_customMem, ) -> *mut ZSTD_DCtx; } extern "C" { pub fn ZSTD_createDStream_advanced( customMem: ZSTD_customMem, ) -> *mut ZSTD_DStream; } extern "C" { pub fn ZSTD_createCDict_advanced( dict: *const ::core::ffi::c_void, dictSize: usize, dictLoadMethod: ZSTD_dictLoadMethod_e, dictContentType: ZSTD_dictContentType_e, cParams: ZSTD_compressionParameters, customMem: ZSTD_customMem, ) -> *mut ZSTD_CDict; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct POOL_ctx_s { _unused: [u8; 0], } #[doc = " Thread pool :\n These prototypes make it possible to share a thread pool among multiple compression contexts.\n This can limit resources for applications with multiple threads where each one uses\n a threaded compression mode (via ZSTD_c_nbWorkers parameter).\n ZSTD_createThreadPool creates a new thread pool with a given number of threads.\n Note that the lifetime of such pool must exist while being used.\n ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value\n to use an internal thread pool).\n ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer."] pub type ZSTD_threadPool = POOL_ctx_s; extern "C" { pub fn ZSTD_createThreadPool(numThreads: usize) -> *mut ZSTD_threadPool; } extern "C" { pub fn ZSTD_freeThreadPool(pool: *mut ZSTD_threadPool); } extern "C" { pub fn ZSTD_CCtx_refThreadPool( cctx: *mut ZSTD_CCtx, pool: *mut ZSTD_threadPool, ) -> usize; } extern "C" { pub fn ZSTD_createCDict_advanced2( dict: *const ::core::ffi::c_void, dictSize: usize, dictLoadMethod: ZSTD_dictLoadMethod_e, dictContentType: ZSTD_dictContentType_e, cctxParams: *const ZSTD_CCtx_params, customMem: ZSTD_customMem, ) -> *mut ZSTD_CDict; } extern "C" { pub fn ZSTD_createDDict_advanced( dict: *const ::core::ffi::c_void, dictSize: usize, dictLoadMethod: ZSTD_dictLoadMethod_e, dictContentType: ZSTD_dictContentType_e, customMem: ZSTD_customMem, ) -> *mut ZSTD_DDict; } extern "C" { #[doc = " ZSTD_createCDict_byReference() :\n Create a digested dictionary for compression\n Dictionary content is just referenced, not duplicated.\n As a consequence, `dictBuffer` **must** outlive CDict,\n and its content must remain unmodified throughout the lifetime of CDict.\n note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef"] pub fn ZSTD_createCDict_byReference( dictBuffer: *const ::core::ffi::c_void, dictSize: usize, compressionLevel: ::core::ffi::c_int, ) -> *mut ZSTD_CDict; } extern "C" { #[doc = " ZSTD_getCParams() :\n @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.\n `estimatedSrcSize` value is optional, select 0 if not known"] pub fn ZSTD_getCParams( compressionLevel: ::core::ffi::c_int, estimatedSrcSize: ::core::ffi::c_ulonglong, dictSize: usize, ) -> ZSTD_compressionParameters; } extern "C" { #[doc = " ZSTD_getParams() :\n same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.\n All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0"] pub fn ZSTD_getParams( compressionLevel: ::core::ffi::c_int, estimatedSrcSize: ::core::ffi::c_ulonglong, dictSize: usize, ) -> ZSTD_parameters; } extern "C" { #[doc = " ZSTD_checkCParams() :\n Ensure param values remain within authorized range.\n @return 0 on success, or an error code (can be checked with ZSTD_isError())"] pub fn ZSTD_checkCParams(params: ZSTD_compressionParameters) -> usize; } extern "C" { #[doc = " ZSTD_adjustCParams() :\n optimize params for a given `srcSize` and `dictSize`.\n `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.\n `dictSize` must be `0` when there is no dictionary.\n cPar can be invalid : all parameters will be clamped within valid range in the @return struct.\n This function never fails (wide contract)"] pub fn ZSTD_adjustCParams( cPar: ZSTD_compressionParameters, srcSize: ::core::ffi::c_ulonglong, dictSize: usize, ) -> ZSTD_compressionParameters; } extern "C" { #[doc = " ZSTD_CCtx_setCParams() :\n Set all parameters provided within @p cparams into the working @p cctx.\n Note : if modifying parameters during compression (MT mode only),\n note that changes to the .windowLog parameter will be ignored.\n @return 0 on success, or an error code (can be checked with ZSTD_isError()).\n On failure, no parameters are updated."] pub fn ZSTD_CCtx_setCParams( cctx: *mut ZSTD_CCtx, cparams: ZSTD_compressionParameters, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_setFParams() :\n Set all parameters provided within @p fparams into the working @p cctx.\n @return 0 on success, or an error code (can be checked with ZSTD_isError())."] pub fn ZSTD_CCtx_setFParams( cctx: *mut ZSTD_CCtx, fparams: ZSTD_frameParameters, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_setParams() :\n Set all parameters provided within @p params into the working @p cctx.\n @return 0 on success, or an error code (can be checked with ZSTD_isError())."] pub fn ZSTD_CCtx_setParams( cctx: *mut ZSTD_CCtx, params: ZSTD_parameters, ) -> usize; } extern "C" { #[doc = " ZSTD_compress_advanced() :\n Note : this function is now DEPRECATED.\n It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.\n This prototype will generate compilation warnings."] pub fn ZSTD_compress_advanced( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, dict: *const ::core::ffi::c_void, dictSize: usize, params: ZSTD_parameters, ) -> usize; } extern "C" { #[doc = " ZSTD_compress_usingCDict_advanced() :\n Note : this function is now DEPRECATED.\n It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.\n This prototype will generate compilation warnings."] pub fn ZSTD_compress_usingCDict_advanced( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, cdict: *const ZSTD_CDict, fParams: ZSTD_frameParameters, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_loadDictionary_byReference() :\n Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.\n It saves some memory, but also requires that `dict` outlives its usage within `cctx`"] pub fn ZSTD_CCtx_loadDictionary_byReference( cctx: *mut ZSTD_CCtx, dict: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_loadDictionary_advanced() :\n Same as ZSTD_CCtx_loadDictionary(), but gives finer control over\n how to load the dictionary (by copy ? by reference ?)\n and how to interpret it (automatic ? force raw mode ? full mode only ?)"] pub fn ZSTD_CCtx_loadDictionary_advanced( cctx: *mut ZSTD_CCtx, dict: *const ::core::ffi::c_void, dictSize: usize, dictLoadMethod: ZSTD_dictLoadMethod_e, dictContentType: ZSTD_dictContentType_e, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_refPrefix_advanced() :\n Same as ZSTD_CCtx_refPrefix(), but gives finer control over\n how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?)"] pub fn ZSTD_CCtx_refPrefix_advanced( cctx: *mut ZSTD_CCtx, prefix: *const ::core::ffi::c_void, prefixSize: usize, dictContentType: ZSTD_dictContentType_e, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_getParameter() :\n Get the requested compression parameter value, selected by enum ZSTD_cParameter,\n and store it into int* value.\n @return : 0, or an error code (which can be tested with ZSTD_isError())."] pub fn ZSTD_CCtx_getParameter( cctx: *const ZSTD_CCtx, param: ZSTD_cParameter, value: *mut ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_params :\n Quick howto :\n - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure\n - ZSTD_CCtxParams_setParameter() : Push parameters one by one into\n an existing ZSTD_CCtx_params structure.\n This is similar to\n ZSTD_CCtx_setParameter().\n - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to\n an existing CCtx.\n These parameters will be applied to\n all subsequent frames.\n - ZSTD_compressStream2() : Do compression using the CCtx.\n - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer.\n\n This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()\n for static allocation of CCtx for single-threaded compression."] pub fn ZSTD_createCCtxParams() -> *mut ZSTD_CCtx_params; } extern "C" { pub fn ZSTD_freeCCtxParams(params: *mut ZSTD_CCtx_params) -> usize; } extern "C" { #[doc = " ZSTD_CCtxParams_reset() :\n Reset params to default values."] pub fn ZSTD_CCtxParams_reset(params: *mut ZSTD_CCtx_params) -> usize; } extern "C" { #[doc = " ZSTD_CCtxParams_init() :\n Initializes the compression parameters of cctxParams according to\n compression level. All other parameters are reset to their default values."] pub fn ZSTD_CCtxParams_init( cctxParams: *mut ZSTD_CCtx_params, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtxParams_init_advanced() :\n Initializes the compression and frame parameters of cctxParams according to\n params. All other parameters are reset to their default values."] pub fn ZSTD_CCtxParams_init_advanced( cctxParams: *mut ZSTD_CCtx_params, params: ZSTD_parameters, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtxParams_setParameter() : Requires v1.4.0+\n Similar to ZSTD_CCtx_setParameter.\n Set one compression parameter, selected by enum ZSTD_cParameter.\n Parameters must be applied to a ZSTD_CCtx using\n ZSTD_CCtx_setParametersUsingCCtxParams().\n @result : a code representing success or failure (which can be tested with\n ZSTD_isError())."] pub fn ZSTD_CCtxParams_setParameter( params: *mut ZSTD_CCtx_params, param: ZSTD_cParameter, value: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtxParams_getParameter() :\n Similar to ZSTD_CCtx_getParameter.\n Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.\n @result : 0, or an error code (which can be tested with ZSTD_isError())."] pub fn ZSTD_CCtxParams_getParameter( params: *const ZSTD_CCtx_params, param: ZSTD_cParameter, value: *mut ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_CCtx_setParametersUsingCCtxParams() :\n Apply a set of ZSTD_CCtx_params to the compression context.\n This can be done even after compression is started,\n if nbWorkers==0, this will have no impact until a new compression is started.\n if nbWorkers>=1, new parameters will be picked up at next job,\n with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated)."] pub fn ZSTD_CCtx_setParametersUsingCCtxParams( cctx: *mut ZSTD_CCtx, params: *const ZSTD_CCtx_params, ) -> usize; } extern "C" { #[doc = " ZSTD_compressStream2_simpleArgs() :\n Same as ZSTD_compressStream2(),\n but using only integral types as arguments.\n This variant might be helpful for binders from dynamic languages\n which have troubles handling structures containing memory pointers."] pub fn ZSTD_compressStream2_simpleArgs( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, dstPos: *mut usize, src: *const ::core::ffi::c_void, srcSize: usize, srcPos: *mut usize, endOp: ZSTD_EndDirective, ) -> usize; } extern "C" { #[doc = " ZSTD_isFrame() :\n Tells if the content of `buffer` starts with a valid Frame Identifier.\n Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.\n Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.\n Note 3 : Skippable Frame Identifiers are considered valid."] pub fn ZSTD_isFrame( buffer: *const ::core::ffi::c_void, size: usize, ) -> ::core::ffi::c_uint; } extern "C" { #[doc = " ZSTD_createDDict_byReference() :\n Create a digested dictionary, ready to start decompression operation without startup delay.\n Dictionary content is referenced, and therefore stays in dictBuffer.\n It is important that dictBuffer outlives DDict,\n it must remain read accessible throughout the lifetime of DDict"] pub fn ZSTD_createDDict_byReference( dictBuffer: *const ::core::ffi::c_void, dictSize: usize, ) -> *mut ZSTD_DDict; } extern "C" { #[doc = " ZSTD_DCtx_loadDictionary_byReference() :\n Same as ZSTD_DCtx_loadDictionary(),\n but references `dict` content instead of copying it into `dctx`.\n This saves memory if `dict` remains around.,\n However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression."] pub fn ZSTD_DCtx_loadDictionary_byReference( dctx: *mut ZSTD_DCtx, dict: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_loadDictionary_advanced() :\n Same as ZSTD_DCtx_loadDictionary(),\n but gives direct control over\n how to load the dictionary (by copy ? by reference ?)\n and how to interpret it (automatic ? force raw mode ? full mode only ?)."] pub fn ZSTD_DCtx_loadDictionary_advanced( dctx: *mut ZSTD_DCtx, dict: *const ::core::ffi::c_void, dictSize: usize, dictLoadMethod: ZSTD_dictLoadMethod_e, dictContentType: ZSTD_dictContentType_e, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_refPrefix_advanced() :\n Same as ZSTD_DCtx_refPrefix(), but gives finer control over\n how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?)"] pub fn ZSTD_DCtx_refPrefix_advanced( dctx: *mut ZSTD_DCtx, prefix: *const ::core::ffi::c_void, prefixSize: usize, dictContentType: ZSTD_dictContentType_e, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_setMaxWindowSize() :\n Refuses allocating internal buffers for frames requiring a window size larger than provided limit.\n This protects a decoder context from reserving too much memory for itself (potential attack scenario).\n This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.\n By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)\n @return : 0, or an error code (which can be tested using ZSTD_isError())."] pub fn ZSTD_DCtx_setMaxWindowSize( dctx: *mut ZSTD_DCtx, maxWindowSize: usize, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_getParameter() :\n Get the requested decompression parameter value, selected by enum ZSTD_dParameter,\n and store it into int* value.\n @return : 0, or an error code (which can be tested with ZSTD_isError())."] pub fn ZSTD_DCtx_getParameter( dctx: *mut ZSTD_DCtx, param: ZSTD_dParameter, value: *mut ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_DCtx_setFormat() :\n This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter().\n Instruct the decoder context about what kind of data to decode next.\n This instruction is mandatory to decode data without a fully-formed header,\n such ZSTD_f_zstd1_magicless for example.\n @return : 0, or an error code (which can be tested using ZSTD_isError())."] pub fn ZSTD_DCtx_setFormat( dctx: *mut ZSTD_DCtx, format: ZSTD_format_e, ) -> usize; } extern "C" { #[doc = " ZSTD_decompressStream_simpleArgs() :\n Same as ZSTD_decompressStream(),\n but using only integral types as arguments.\n This can be helpful for binders from dynamic languages\n which have troubles handling structures containing memory pointers."] pub fn ZSTD_decompressStream_simpleArgs( dctx: *mut ZSTD_DCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, dstPos: *mut usize, src: *const ::core::ffi::c_void, srcSize: usize, srcPos: *mut usize, ) -> usize; } extern "C" { #[doc = " ZSTD_initCStream_srcSize() :\n This function is DEPRECATED, and equivalent to:\n ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)\n ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);\n ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\n\n pledgedSrcSize must be correct. If it is not known at init time, use\n ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,\n \"0\" also disables frame content size field. It may be enabled in the future.\n This prototype will generate compilation warnings."] pub fn ZSTD_initCStream_srcSize( zcs: *mut ZSTD_CStream, compressionLevel: ::core::ffi::c_int, pledgedSrcSize: ::core::ffi::c_ulonglong, ) -> usize; } extern "C" { #[doc = " ZSTD_initCStream_usingDict() :\n This function is DEPRECATED, and is equivalent to:\n ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);\n ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);\n\n Creates of an internal CDict (incompatible with static CCtx), except if\n dict == NULL or dictSize < 8, in which case no dict is used.\n Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if\n it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.\n This prototype will generate compilation warnings."] pub fn ZSTD_initCStream_usingDict( zcs: *mut ZSTD_CStream, dict: *const ::core::ffi::c_void, dictSize: usize, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { #[doc = " ZSTD_initCStream_advanced() :\n This function is DEPRECATED, and is equivalent to:\n ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n ZSTD_CCtx_setParams(zcs, params);\n ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\n ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);\n\n dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.\n pledgedSrcSize must be correct.\n If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.\n This prototype will generate compilation warnings."] pub fn ZSTD_initCStream_advanced( zcs: *mut ZSTD_CStream, dict: *const ::core::ffi::c_void, dictSize: usize, params: ZSTD_parameters, pledgedSrcSize: ::core::ffi::c_ulonglong, ) -> usize; } extern "C" { #[doc = " ZSTD_initCStream_usingCDict() :\n This function is DEPRECATED, and equivalent to:\n ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n ZSTD_CCtx_refCDict(zcs, cdict);\n\n note : cdict will just be referenced, and must outlive compression session\n This prototype will generate compilation warnings."] pub fn ZSTD_initCStream_usingCDict( zcs: *mut ZSTD_CStream, cdict: *const ZSTD_CDict, ) -> usize; } extern "C" { #[doc = " ZSTD_initCStream_usingCDict_advanced() :\n This function is DEPRECATED, and is equivalent to:\n ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n ZSTD_CCtx_setFParams(zcs, fParams);\n ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\n ZSTD_CCtx_refCDict(zcs, cdict);\n\n same as ZSTD_initCStream_usingCDict(), with control over frame parameters.\n pledgedSrcSize must be correct. If srcSize is not known at init time, use\n value ZSTD_CONTENTSIZE_UNKNOWN.\n This prototype will generate compilation warnings."] pub fn ZSTD_initCStream_usingCDict_advanced( zcs: *mut ZSTD_CStream, cdict: *const ZSTD_CDict, fParams: ZSTD_frameParameters, pledgedSrcSize: ::core::ffi::c_ulonglong, ) -> usize; } extern "C" { #[doc = " ZSTD_resetCStream() :\n This function is DEPRECATED, and is equivalent to:\n ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\n Note: ZSTD_resetCStream() interprets pledgedSrcSize == 0 as ZSTD_CONTENTSIZE_UNKNOWN, but\n ZSTD_CCtx_setPledgedSrcSize() does not do the same, so ZSTD_CONTENTSIZE_UNKNOWN must be\n explicitly specified.\n\n start a new frame, using same parameters from previous frame.\n This is typically useful to skip dictionary loading stage, since it will re-use it in-place.\n Note that zcs must be init at least once before using ZSTD_resetCStream().\n If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.\n If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.\n For the time being, pledgedSrcSize==0 is interpreted as \"srcSize unknown\" for compatibility with older programs,\n but it will change to mean \"empty\" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.\n @return : 0, or an error code (which can be tested using ZSTD_isError())\n This prototype will generate compilation warnings."] pub fn ZSTD_resetCStream( zcs: *mut ZSTD_CStream, pledgedSrcSize: ::core::ffi::c_ulonglong, ) -> usize; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ZSTD_frameProgression { pub ingested: ::core::ffi::c_ulonglong, pub consumed: ::core::ffi::c_ulonglong, pub produced: ::core::ffi::c_ulonglong, pub flushed: ::core::ffi::c_ulonglong, pub currentJobID: ::core::ffi::c_uint, pub nbActiveWorkers: ::core::ffi::c_uint, } extern "C" { pub fn ZSTD_getFrameProgression( cctx: *const ZSTD_CCtx, ) -> ZSTD_frameProgression; } extern "C" { #[doc = " ZSTD_toFlushNow() :\n Tell how many bytes are ready to be flushed immediately.\n Useful for multithreading scenarios (nbWorkers >= 1).\n Probe the oldest active job, defined as oldest job not yet entirely flushed,\n and check its output buffer.\n @return : amount of data stored in oldest job and ready to be flushed immediately.\n if @return == 0, it means either :\n + there is no active job (could be checked with ZSTD_frameProgression()), or\n + oldest job is still actively compressing data,\n but everything it has produced has also been flushed so far,\n therefore flush speed is limited by production speed of oldest job\n irrespective of the speed of concurrent (and newer) jobs."] pub fn ZSTD_toFlushNow(cctx: *mut ZSTD_CCtx) -> usize; } extern "C" { #[doc = " This function is deprecated, and is equivalent to:\n\n ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\n ZSTD_DCtx_loadDictionary(zds, dict, dictSize);\n\n note: no dictionary will be used if dict == NULL or dictSize < 8"] pub fn ZSTD_initDStream_usingDict( zds: *mut ZSTD_DStream, dict: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } extern "C" { #[doc = " This function is deprecated, and is equivalent to:\n\n ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\n ZSTD_DCtx_refDDict(zds, ddict);\n\n note : ddict is referenced, it must outlive decompression session"] pub fn ZSTD_initDStream_usingDDict( zds: *mut ZSTD_DStream, ddict: *const ZSTD_DDict, ) -> usize; } extern "C" { #[doc = " This function is deprecated, and is equivalent to:\n\n ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\n\n re-use decompression parameters from previous init; saves dictionary loading"] pub fn ZSTD_resetDStream(zds: *mut ZSTD_DStream) -> usize; } pub type ZSTD_sequenceProducer_F = ::core::option::Option< unsafe extern "C" fn( sequenceProducerState: *mut ::core::ffi::c_void, outSeqs: *mut ZSTD_Sequence, outSeqsCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, dict: *const ::core::ffi::c_void, dictSize: usize, compressionLevel: ::core::ffi::c_int, windowSize: usize, ) -> usize, >; extern "C" { #[doc = " ZSTD_registerSequenceProducer() :\n Instruct zstd to use a block-level external sequence producer function.\n\n The sequenceProducerState must be initialized by the caller, and the caller is\n responsible for managing its lifetime. This parameter is sticky across\n compressions. It will remain set until the user explicitly resets compression\n parameters.\n\n Sequence producer registration is considered to be an \"advanced parameter\",\n part of the \"advanced API\". This means it will only have an effect on compression\n APIs which respect advanced parameters, such as compress2() and compressStream2().\n Older compression APIs such as compressCCtx(), which predate the introduction of\n \"advanced parameters\", will ignore any external sequence producer setting.\n\n The sequence producer can be \"cleared\" by registering a NULL function pointer. This\n removes all limitations described above in the \"LIMITATIONS\" section of the API docs.\n\n The user is strongly encouraged to read the full API documentation (above) before\n calling this function."] pub fn ZSTD_registerSequenceProducer( cctx: *mut ZSTD_CCtx, sequenceProducerState: *mut ::core::ffi::c_void, sequenceProducer: ZSTD_sequenceProducer_F, ); } extern "C" { #[doc = "Buffer-less streaming compression (synchronous mode)\n\nA ZSTD_CCtx object is required to track streaming operations.\nUse ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.\nZSTD_CCtx object can be re-used multiple times within successive compression operations.\n\nStart by initializing a context.\nUse ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression.\n\nThen, consume your input using ZSTD_compressContinue().\nThere are some important considerations to keep in mind when using this advanced function :\n- ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.\n- Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.\n- Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.\nWorst case evaluation is provided by ZSTD_compressBound().\nZSTD_compressContinue() doesn't guarantee recover after a failed compression.\n- ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).\nIt remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)\n- ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.\nIn which case, it will \"discard\" the relevant memory section from its history.\n\nFinish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.\nIt's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.\nWithout last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.\n\n`ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again."] pub fn ZSTD_compressBegin( cctx: *mut ZSTD_CCtx, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { pub fn ZSTD_compressBegin_usingDict( cctx: *mut ZSTD_CCtx, dict: *const ::core::ffi::c_void, dictSize: usize, compressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { pub fn ZSTD_compressBegin_usingCDict( cctx: *mut ZSTD_CCtx, cdict: *const ZSTD_CDict, ) -> usize; } extern "C" { pub fn ZSTD_copyCCtx( cctx: *mut ZSTD_CCtx, preparedCCtx: *const ZSTD_CCtx, pledgedSrcSize: ::core::ffi::c_ulonglong, ) -> usize; } extern "C" { pub fn ZSTD_compressContinue( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_compressEnd( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_compressBegin_advanced( cctx: *mut ZSTD_CCtx, dict: *const ::core::ffi::c_void, dictSize: usize, params: ZSTD_parameters, pledgedSrcSize: ::core::ffi::c_ulonglong, ) -> usize; } extern "C" { pub fn ZSTD_compressBegin_usingCDict_advanced( cctx: *mut ZSTD_CCtx, cdict: *const ZSTD_CDict, fParams: ZSTD_frameParameters, pledgedSrcSize: ::core::ffi::c_ulonglong, ) -> usize; } extern "C" { #[doc = "Buffer-less streaming decompression (synchronous mode)\n\nA ZSTD_DCtx object is required to track streaming operations.\nUse ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.\nA ZSTD_DCtx object can be re-used multiple times.\n\nFirst typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().\nFrame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.\nData fragment must be large enough to ensure successful decoding.\n`ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.\nresult : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.\n>0 : `srcSize` is too small, please provide at least result bytes on next attempt.\nerrorCode, which can be tested using ZSTD_isError().\n\nIt fills a ZSTD_frameHeader structure with important information to correctly decode the frame,\nsuch as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).\nNote that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.\nAs a consequence, check that values remain within valid application range.\nFor example, do not allocate memory blindly, check that `windowSize` is within expectation.\nEach application can set its own limits, depending on local restrictions.\nFor extended interoperability, it is recommended to support `windowSize` of at least 8 MB.\n\nZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.\nZSTD_decompressContinue() is very sensitive to contiguity,\nif 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,\nor that previous contiguous segment is large enough to properly handle maximum back-reference distance.\nThere are multiple ways to guarantee this condition.\n\nThe most memory efficient way is to use a round buffer of sufficient size.\nSufficient size is determined by invoking ZSTD_decodingBufferSize_min(),\nwhich can return an error code if required value is too large for current system (in 32-bits mode).\nIn a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,\nup to the moment there is not enough room left in the buffer to guarantee decoding another full block,\nwhich maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.\nAt which point, decoding can resume from the beginning of the buffer.\nNote that already decoded data stored in the buffer should be flushed before being overwritten.\n\nThere are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.\n\nFinally, if you control the compression process, you can also ignore all buffer size rules,\nas long as the encoder and decoder progress in \"lock-step\",\naka use exactly the same buffer sizes, break contiguity at the same place, etc.\n\nOnce buffers are setup, start decompression, with ZSTD_decompressBegin().\nIf decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().\n\nThen use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.\nZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().\nZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.\n\nresult of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).\nIt can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.\nIt can also be an error code, which can be tested with ZSTD_isError().\n\nA frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.\nContext can then be reset to start a new decompression.\n\nNote : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().\nThis information is not required to properly decode a frame.\n\n== Special case : skippable frames ==\n\nSkippable frames allow integration of user-defined data into a flow of concatenated frames.\nSkippable frames will be ignored (skipped) by decompressor.\nThe format of skippable frames is as follows :\na) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F\nb) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits\nc) Frame Content - any content (User Data) of length equal to Frame Size\nFor skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.\nFor skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content."] pub fn ZSTD_decodingBufferSize_min( windowSize: ::core::ffi::c_ulonglong, frameContentSize: ::core::ffi::c_ulonglong, ) -> usize; } extern "C" { pub fn ZSTD_decompressBegin(dctx: *mut ZSTD_DCtx) -> usize; } extern "C" { pub fn ZSTD_decompressBegin_usingDict( dctx: *mut ZSTD_DCtx, dict: *const ::core::ffi::c_void, dictSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_decompressBegin_usingDDict( dctx: *mut ZSTD_DCtx, ddict: *const ZSTD_DDict, ) -> usize; } extern "C" { pub fn ZSTD_nextSrcSizeToDecompress(dctx: *mut ZSTD_DCtx) -> usize; } extern "C" { pub fn ZSTD_decompressContinue( dctx: *mut ZSTD_DCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_copyDCtx(dctx: *mut ZSTD_DCtx, preparedDCtx: *const ZSTD_DCtx); } #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum ZSTD_nextInputType_e { ZSTDnit_frameHeader = 0, ZSTDnit_blockHeader = 1, ZSTDnit_block = 2, ZSTDnit_lastBlock = 3, ZSTDnit_checksum = 4, ZSTDnit_skippableFrame = 5, } extern "C" { pub fn ZSTD_nextInputType(dctx: *mut ZSTD_DCtx) -> ZSTD_nextInputType_e; } extern "C" { #[doc = "This API is deprecated in favor of the regular compression API.\nYou can get the frame header down to 2 bytes by setting:\n- ZSTD_c_format = ZSTD_f_zstd1_magicless\n- ZSTD_c_contentSizeFlag = 0\n- ZSTD_c_checksumFlag = 0\n- ZSTD_c_dictIDFlag = 0\n\nThis API is not as well tested as our normal API, so we recommend not using it.\nWe will be removing it in a future version. If the normal API doesn't provide\nthe functionality you need, please open a GitHub issue.\n\nBlock functions produce and decode raw zstd blocks, without frame metadata.\nFrame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).\nBut users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.\n\nA few rules to respect :\n- Compressing and decompressing require a context structure\n+ Use ZSTD_createCCtx() and ZSTD_createDCtx()\n- It is necessary to init context before starting\n+ compression : any ZSTD_compressBegin*() variant, including with dictionary\n+ decompression : any ZSTD_decompressBegin*() variant, including with dictionary\n- Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB\n+ If input is larger than a block size, it's necessary to split input data into multiple blocks\n+ For inputs larger than a single block, consider using regular ZSTD_compress() instead.\nFrame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.\n- When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !\n===> In which case, nothing is produced into `dst` !\n+ User __must__ test for such outcome and deal directly with uncompressed data\n+ A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.\nDoing so would mess up with statistics history, leading to potential data corruption.\n+ ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!\n+ In case of multiple successive blocks, should some of them be uncompressed,\ndecoder must be informed of their existence in order to follow proper history.\nUse ZSTD_insertBlock() for such a case."] pub fn ZSTD_getBlockSize(cctx: *const ZSTD_CCtx) -> usize; } extern "C" { pub fn ZSTD_compressBlock( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_decompressBlock( dctx: *mut ZSTD_DCtx, dst: *mut ::core::ffi::c_void, dstCapacity: usize, src: *const ::core::ffi::c_void, srcSize: usize, ) -> usize; } extern "C" { pub fn ZSTD_insertBlock( dctx: *mut ZSTD_DCtx, blockStart: *const ::core::ffi::c_void, blockSize: usize, ) -> usize; }
#![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] #![no_std] //! Low-level bindings to the [zstd] library. //! //! [zstd]: https://facebook.github.io/zstd/ #[cfg(target_arch = "wasm32")] extern crate alloc; #[cfg(target_arch = "wasm32")] mod wasm_shim; // If running bindgen, we'll end up with the correct bindings anyway. #[cfg(feature = "bindgen")] include!(concat!(env!("OUT_DIR"), "/bindings.rs")); // The bindings used depend on a few feature flags. #[cfg(all(not(feature = "experimental"), not(feature = "bindgen")))] include!("bindings_zstd.rs"); #[cfg(all( not(feature = "experimental"), feature = "zdict_builder", not(feature = "bindgen") ))] include!("bindings_zdict.rs"); #[cfg(all(feature = "experimental", not(feature = "bindgen")))] include!("bindings_zstd_experimental.rs"); #[cfg(all( feature = "experimental", feature = "zdict_builder", not(feature = "bindgen") ))] include!("bindings_zdict_experimental.rs"); #[cfg(all( feature = "seekable", not(feature = "bindgen") ))] include!("bindings_zstd_seekable.rs");
use alloc::alloc::{alloc, alloc_zeroed, dealloc, Layout}; use core::ffi::{c_int, c_void}; const USIZE_ALIGN: usize = core::mem::align_of::<usize>(); const USIZE_SIZE: usize = core::mem::size_of::<usize>(); #[no_mangle] pub extern "C" fn rust_zstd_wasm_shim_qsort( base: *mut c_void, n_items: usize, size: usize, compar: extern "C" fn(*const c_void, *const c_void) -> c_int, ) { unsafe { match size { 1 => qsort::<1>(base, n_items, compar), 2 => qsort::<2>(base, n_items, compar), 4 => qsort::<4>(base, n_items, compar), 8 => qsort::<8>(base, n_items, compar), 16 => qsort::<16>(base, n_items, compar), _ => panic!("Unsupported qsort item size"), } } } unsafe fn qsort<const N: usize>( base: *mut c_void, n_items: usize, compar: extern "C" fn(*const c_void, *const c_void) -> c_int, ) { let base: &mut [[u8; N]] = core::slice::from_raw_parts_mut(base as *mut [u8; N], n_items); base.sort_unstable_by(|a, b| { match compar(a.as_ptr() as *const c_void, b.as_ptr() as *const c_void) { ..=-1 => core::cmp::Ordering::Less, 0 => core::cmp::Ordering::Equal, 1.. => core::cmp::Ordering::Greater, } }); } #[no_mangle] pub extern "C" fn rust_zstd_wasm_shim_malloc(size: usize) -> *mut c_void { wasm_shim_alloc::<false>(size) } #[no_mangle] pub extern "C" fn rust_zstd_wasm_shim_memcmp( str1: *const c_void, str2: *const c_void, n: usize, ) -> i32 { // Safety: function contracts requires str1 and str2 at least `n`-long. unsafe { let str1: &[u8] = core::slice::from_raw_parts(str1 as *const u8, n); let str2: &[u8] = core::slice::from_raw_parts(str2 as *const u8, n); match str1.cmp(str2) { core::cmp::Ordering::Less => -1, core::cmp::Ordering::Equal => 0, core::cmp::Ordering::Greater => 1, } } } #[no_mangle] pub extern "C" fn rust_zstd_wasm_shim_calloc( nmemb: usize, size: usize, ) -> *mut c_void { // note: calloc expects the allocation to be zeroed wasm_shim_alloc::<true>(nmemb * size) } #[inline] fn wasm_shim_alloc<const ZEROED: bool>(size: usize) -> *mut c_void { // in order to recover the size upon free, we store the size below the allocation // special alignment is never requested via the malloc API, // so it's not stored, and usize-alignment is used // memory layout: [size] [allocation] let full_alloc_size = size + USIZE_SIZE; unsafe { let layout = Layout::from_size_align_unchecked(full_alloc_size, USIZE_ALIGN); let ptr = if ZEROED { alloc_zeroed(layout) } else { alloc(layout) }; // SAFETY: ptr is usize-aligned and we've allocated sufficient memory ptr.cast::<usize>().write(full_alloc_size); ptr.add(USIZE_SIZE).cast() } } #[no_mangle] pub unsafe extern "C" fn rust_zstd_wasm_shim_free(ptr: *mut c_void) { // the layout for the allocation needs to be recovered for dealloc // - the size must be recovered from directly below the allocation // - the alignment will always by USIZE_ALIGN let alloc_ptr = ptr.sub(USIZE_SIZE); // SAFETY: the allocation routines must uphold having a valid usize below the provided pointer let full_alloc_size = alloc_ptr.cast::<usize>().read(); let layout = Layout::from_size_align_unchecked(full_alloc_size, USIZE_ALIGN); dealloc(alloc_ptr.cast(), layout); } #[no_mangle] pub unsafe extern "C" fn rust_zstd_wasm_shim_memcpy( dest: *mut c_void, src: *const c_void, n: usize, ) -> *mut c_void { core::ptr::copy_nonoverlapping(src as *const u8, dest as *mut u8, n); dest } #[no_mangle] pub unsafe extern "C" fn rust_zstd_wasm_shim_memmove( dest: *mut c_void, src: *const c_void, n: usize, ) -> *mut c_void { core::ptr::copy(src as *const u8, dest as *mut u8, n); dest } #[no_mangle] pub unsafe extern "C" fn rust_zstd_wasm_shim_memset( dest: *mut c_void, c: c_int, n: usize, ) -> *mut c_void { core::ptr::write_bytes(dest as *mut u8, c as u8, n); dest }
fn main() { let some_content = "Something"; let compression_level = 3; // Compress some text let compressed = zstd::encode_all(some_content.as_bytes(), compression_level).unwrap(); // Now uncompress it let decoded: Vec<u8> = zstd::decode_all(compressed.as_slice()).unwrap(); // Convert it to text let decoded_text = std::str::from_utf8(&decoded).unwrap(); assert_eq!(some_content, decoded_text); }
use clap::Parser; use humansize::{format_size, DECIMAL}; use std::io::Read; use std::path::PathBuf; #[derive(Parser, Debug)] #[command(author, version, about, long_about=None)] struct Args { /// Directory containing the data to compress. /// To use the silesia corpus, run the following commands: /// /// ``` /// wget http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip /// unzip silesia.zip -d silesia/ /// cargo run --example benchmark -- silesia/", /// ``` dir: PathBuf, /// First compression level to test. #[arg(short, long)] begin: i32, /// Last compression level to test. #[arg(short, long)] end: i32, } fn main() { let args = Args::parse(); // Step 1: load data in memory let files: Vec<Vec<u8>> = std::fs::read_dir(args.dir) .unwrap() .map(|file| { let file = file.unwrap(); let mut content = Vec::new(); std::fs::File::open(file.path()) .unwrap() .read_to_end(&mut content) .unwrap(); content }) .collect(); let total_size: usize = files.iter().map(|data| data.len()).sum(); // Step 3: compress data // Print tsv headers println!( "{}\t{}\t{}\t{}", "Compression level", "Compression ratio", "Compression speed", "Decompression speed" ); for level in args.begin..args.end { // Compress each sample sequentially. let start = std::time::Instant::now(); let compressed: Vec<Vec<u8>> = files .iter() .map(|data| zstd::encode_all(&data[..], level).unwrap()) .collect(); let mid = std::time::Instant::now(); let uncompressed: Vec<Vec<u8>> = compressed .iter() .map(|data| zstd::decode_all(&data[..]).unwrap()) .collect(); let end = std::time::Instant::now(); for (original, processed) in files.iter().zip(uncompressed.iter()) { assert_eq!(&original[..], &processed[..]); } let compress_time = mid - start; let decompress_time = end - mid; let compress_seconds = compress_time.as_secs() as f64 + compress_time.subsec_nanos() as f64 * 1e-9; let decompress_seconds = decompress_time.as_secs() as f64 + decompress_time.subsec_nanos() as f64 * 1e-9; let compressed_size: usize = compressed.iter().map(Vec::len).sum(); let speed = (total_size as f64 / compress_seconds) as usize; let speed = format_size(speed, DECIMAL); let d_speed = (total_size as f64 / decompress_seconds) as usize; let d_speed = format_size(d_speed, DECIMAL); let ratio = compressed_size as f64 / total_size as f64; println!("{}\t{:.3}\t{}/s\t{}/s", level, 1.0 / ratio, speed, d_speed); } }
use std::env; use std::io::{self, Write}; use std::str::FromStr; fn main() { match env::args().nth(1) { None => { writeln!( &mut io::stderr(), "Invalid option. Usage: `stream [-d|-1..-22]`" ) .unwrap(); } Some(ref option) if option == "-d" => decompress(), Some(ref option) => { if option.starts_with('-') { let level = match i32::from_str(&option[1..]) { Ok(level) => level, Err(e) => panic!("Error parsing compression level: {}", e), }; compress(level); } else { writeln!( &mut io::stderr(), "Invalid option. Usage: `stream [-d|-1..-22]`" ) .unwrap(); } } } } fn compress(level: i32) { zstd::stream::copy_encode(io::stdin(), io::stdout(), level).unwrap(); } fn decompress() { zstd::stream::copy_decode(io::stdin(), io::stdout()).unwrap(); }
use clap::Parser; use std::io; use std::path::PathBuf; #[derive(Parser, Debug)] #[command(author, version, about, long_about=None)] /// This program trains a dictionary from one or more files, /// to make future compression of similar small files more efficient. /// /// The dictionary will need to be present during decompression, /// but if you need to compress many small files individually, /// it may be worth the trouble. struct Args { /// Maximum dictionary size in bytes. #[arg(short, long)] max_size: usize, /// Files to use as input. files: Vec<PathBuf>, } fn main() { let args = Args::parse(); let dict = zstd::dict::from_files(&args.files, args.max_size).unwrap(); let mut dict_reader: &[u8] = &dict; io::copy(&mut dict_reader, &mut io::stdout()).unwrap(); }
use zstd; use std::env; use std::fs; use std::io; const SUFFIX: &'static str = ".zst"; fn main() { for arg in env::args().skip(1) { if arg.ends_with(SUFFIX) { match decompress(&arg) { Ok(()) => println!("Decompressed {}", arg), Err(e) => println!("Error decompressing {}: {}", arg, e), } } else { match compress(&arg) { Ok(()) => println!("Compressed {}", arg), Err(e) => println!("Error compressing {}: {}", arg, e), } } } } fn compress(source: &str) -> io::Result<()> { let mut file = fs::File::open(source)?; let mut encoder = { let target = fs::File::create(source.to_string() + SUFFIX)?; zstd::Encoder::new(target, 1)? }; io::copy(&mut file, &mut encoder)?; encoder.finish()?; Ok(()) } fn decompress(source: &str) -> io::Result<()> { let mut decoder = { let file = fs::File::open(source)?; zstd::Decoder::new(file)? }; let mut target = fs::File::create(source.trim_end_matches(SUFFIX))?; io::copy(&mut decoder, &mut target)?; Ok(()) }
use clap::Parser; use std::fs; use std::io; #[derive(Parser, Debug)] #[command(author, version, about, long_about=None)] struct Args { /// Files to decompress. With no file, or when given -, read standard input. file: Vec<String>, } fn main() { // This will be a simple application: // takes a single (repeatable and optional) argument. let args = Args::parse(); // If nothing was given, act as if `-` was there. if args.file.is_empty() { decompress_file("-").unwrap(); } else { for file in &args.file { decompress_file(file).unwrap(); } } } // Dispatch the source reader depending on the filename fn decompress_file(file: &str) -> io::Result<()> { match file { "-" => decompress_from(io::stdin()), other => decompress_from(io::BufReader::new(fs::File::open(other)?)), } } // Decompress from a `Reader` into stdout fn decompress_from<R: io::Read>(r: R) -> io::Result<()> { let mut decoder = zstd::Decoder::new(r)?; io::copy(&mut decoder, &mut io::stdout())?; Ok(()) }
use crate::map_error_code; use std::io; use zstd_safe; /// Allows to compress independently multiple chunks of data. /// /// Each job will be processed entirely in-memory without streaming, so this /// is most fitting for many small jobs. To compress larger volume that don't /// easily fit in memory, a streaming compression may be more appropriate. /// /// It is more efficient than a streaming compressor for 2 reasons: /// * It re-uses the zstd context between jobs to avoid re-allocations /// * It avoids copying data from a `Read` into a temporary buffer before compression. #[derive(Default)] pub struct Compressor<'a> { context: zstd_safe::CCtx<'a>, } impl Compressor<'static> { /// Creates a new zstd compressor pub fn new(level: i32) -> io::Result<Self> { Self::with_dictionary(level, &[]) } /// Creates a new zstd compressor, using the given dictionary. /// /// Note that using a dictionary means that decompression will need to use /// the same dictionary. pub fn with_dictionary(level: i32, dictionary: &[u8]) -> io::Result<Self> { let mut compressor = Self::default(); compressor.set_dictionary(level, dictionary)?; Ok(compressor) } } impl<'a> Compressor<'a> { /// Creates a new compressor using an existing `EncoderDictionary`. /// /// The compression level will be the one specified when creating the dictionary. /// /// Note that using a dictionary means that decompression will need to use /// the same dictionary. pub fn with_prepared_dictionary<'b>( dictionary: &'a crate::dict::EncoderDictionary<'b>, ) -> io::Result<Self> where 'b: 'a, { let mut compressor = Self::default(); compressor.set_prepared_dictionary(dictionary)?; Ok(compressor) } /// Changes the compression level used by this compressor. /// /// *This will clear any dictionary previously registered.* /// /// If you want to keep the existing dictionary, you will need to pass it again to /// `Self::set_dictionary` instead of using this method. pub fn set_compression_level(&mut self, level: i32) -> io::Result<()> { self.set_dictionary(level, &[]) } /// Changes the dictionary and compression level used by this compressor. /// /// Will affect future compression jobs. /// /// Note that using a dictionary means that decompression will need to use /// the same dictionary. pub fn set_dictionary( &mut self, level: i32, dictionary: &[u8], ) -> io::Result<()> { self.context .set_parameter(zstd_safe::CParameter::CompressionLevel(level)) .map_err(map_error_code)?; self.context .load_dictionary(dictionary) .map_err(map_error_code)?; Ok(()) } /// Changes the dictionary used by this compressor. /// /// The compression level used when preparing the dictionary will be used. /// /// Note that using a dictionary means that decompression will need to use /// the same dictionary. pub fn set_prepared_dictionary<'b>( &mut self, dictionary: &'a crate::dict::EncoderDictionary<'b>, ) -> io::Result<()> where 'b: 'a, { self.context .ref_cdict(dictionary.as_cdict()) .map_err(map_error_code)?; Ok(()) } /// Compress a single block of data to the given destination buffer. /// /// Returns the number of bytes written, or an error if something happened /// (for instance if the destination buffer was too small). /// /// A level of `0` uses zstd's default (currently `3`). pub fn compress_to_buffer<C: zstd_safe::WriteBuf + ?Sized>( &mut self, source: &[u8], destination: &mut C, ) -> io::Result<usize> { self.context .compress2(destination, source) .map_err(map_error_code) } /// Compresses a block of data and returns the compressed result. /// /// A level of `0` uses zstd's default (currently `3`). pub fn compress(&mut self, data: &[u8]) -> io::Result<Vec<u8>> { // We allocate a big buffer, slightly larger than the input data. let buffer_len = zstd_safe::compress_bound(data.len()); let mut buffer = Vec::with_capacity(buffer_len); self.compress_to_buffer(data, &mut buffer)?; // Should we shrink the vec? Meh, let the user do it if he wants. Ok(buffer) } /// Gives mutable access to the internal context. pub fn context_mut(&mut self) -> &mut zstd_safe::CCtx<'a> { &mut self.context } /// Sets a compression parameter for this compressor. pub fn set_parameter( &mut self, parameter: zstd_safe::CParameter, ) -> io::Result<()> { self.context .set_parameter(parameter) .map_err(map_error_code)?; Ok(()) } crate::encoder_parameters!(); } fn _assert_traits() { fn _assert_send<T: Send>(_: T) {} _assert_send(Compressor::new(0)); }
use crate::map_error_code; #[cfg(feature = "experimental")] use std::convert::TryInto; use std::io; use zstd_safe; /// Allows to decompress independently multiple blocks of data. /// /// This reduces memory usage compared to calling `decompress` multiple times. #[derive(Default)] pub struct Decompressor<'a> { context: zstd_safe::DCtx<'a>, } impl Decompressor<'static> { /// Creates a new zstd decompressor. pub fn new() -> io::Result<Self> { Self::with_dictionary(&[]) } /// Creates a new zstd decompressor, using the given dictionary. pub fn with_dictionary(dictionary: &[u8]) -> io::Result<Self> { let mut decompressor = Self::default(); decompressor.set_dictionary(dictionary)?; Ok(decompressor) } } impl<'a> Decompressor<'a> { /// Creates a new decompressor using an existing `DecoderDictionary`. /// /// Note that using a dictionary means that compression will need to use /// the same dictionary. pub fn with_prepared_dictionary<'b>( dictionary: &'a crate::dict::DecoderDictionary<'b>, ) -> io::Result<Self> where 'b: 'a, { let mut decompressor = Self::default(); decompressor.set_prepared_dictionary(dictionary)?; Ok(decompressor) } /// Changes the dictionary used by this decompressor. /// /// Will affect future compression jobs. /// /// Note that using a dictionary means that compression will need to use /// the same dictionary. pub fn set_dictionary(&mut self, dictionary: &[u8]) -> io::Result<()> { self.context .load_dictionary(dictionary) .map_err(map_error_code)?; Ok(()) } /// Changes the dictionary used by this decompressor. /// /// Note that using a dictionary means that compression will need to use /// the same dictionary. pub fn set_prepared_dictionary<'b>( &mut self, dictionary: &'a crate::dict::DecoderDictionary<'b>, ) -> io::Result<()> where 'b: 'a, { self.context .ref_ddict(dictionary.as_ddict()) .map_err(map_error_code)?; Ok(()) } /// Deompress a single block of data to the given destination buffer. /// /// Returns the number of bytes written, or an error if something happened /// (for instance if the destination buffer was too small). pub fn decompress_to_buffer<C: zstd_safe::WriteBuf + ?Sized>( &mut self, source: &[u8], destination: &mut C, ) -> io::Result<usize> { self.context .decompress(destination, source) .map_err(map_error_code) } /// Decompress a block of data, and return the result in a `Vec<u8>`. /// /// The decompressed data should be at most `capacity` bytes, /// or an error will be returned. pub fn decompress( &mut self, data: &[u8], capacity: usize, ) -> io::Result<Vec<u8>> { let capacity = Self::upper_bound(data).unwrap_or(capacity).min(capacity); let mut buffer = Vec::with_capacity(capacity); self.decompress_to_buffer(data, &mut buffer)?; Ok(buffer) } /// Sets a decompression parameter for this decompressor. pub fn set_parameter( &mut self, parameter: zstd_safe::DParameter, ) -> io::Result<()> { self.context .set_parameter(parameter) .map_err(map_error_code)?; Ok(()) } crate::decoder_parameters!(); /// Get an upper bound on the decompressed size of data, if available /// /// This can be used to pre-allocate enough capacity for `decompress_to_buffer` /// and is used by `decompress` to ensure that it does not over-allocate if /// you supply a large `capacity`. /// /// Will return `None` if the upper bound cannot be determined or is larger than `usize::MAX` /// /// Note that unless the `experimental` feature is enabled, this will always return `None`. pub fn upper_bound(_data: &[u8]) -> Option<usize> { #[cfg(feature = "experimental")] { let bound = zstd_safe::decompress_bound(_data).ok()?; bound.try_into().ok() } #[cfg(not(feature = "experimental"))] { None } } } fn _assert_traits() { fn _assert_send<T: Send>(_: T) {} _assert_send(Decompressor::new()); }
//! Compress and decompress data in bulk. //! //! These methods process all the input data at once. //! It is therefore best used with relatively small blocks //! (like small network packets). mod compressor; mod decompressor; #[cfg(test)] mod tests; pub use self::compressor::Compressor; pub use self::decompressor::Decompressor; use std::io; /// Compresses a single block of data to the given destination buffer. /// /// Returns the number of bytes written, or an error if something happened /// (for instance if the destination buffer was too small). /// /// A level of `0` uses zstd's default (currently `3`). pub fn compress_to_buffer( source: &[u8], destination: &mut [u8], level: i32, ) -> io::Result<usize> { Compressor::new(level)?.compress_to_buffer(source, destination) } /// Compresses a block of data and returns the compressed result. /// /// A level of `0` uses zstd's default (currently `3`). pub fn compress(data: &[u8], level: i32) -> io::Result<Vec<u8>> { Compressor::new(level)?.compress(data) } /// Deompress a single block of data to the given destination buffer. /// /// Returns the number of bytes written, or an error if something happened /// (for instance if the destination buffer was too small). pub fn decompress_to_buffer( source: &[u8], destination: &mut [u8], ) -> io::Result<usize> { Decompressor::new()?.decompress_to_buffer(source, destination) } /// Decompresses a block of data and returns the decompressed result. /// /// The decompressed data should be at most `capacity` bytes, /// or an error will be returned. pub fn decompress(data: &[u8], capacity: usize) -> io::Result<Vec<u8>> { Decompressor::new()?.decompress(data, capacity) }
use super::{compress, decompress}; const TEXT: &str = include_str!("../../assets/example.txt"); #[test] fn test_direct() { // Can we include_str!("assets/example.txt")? // It's excluded from the packaging step, so maybe not. crate::test_cycle_unwrap( TEXT.as_bytes(), |data| compress(data, 1), |data| decompress(data, TEXT.len()), ); } #[test] fn test_stream_compat() { // We can bulk-compress and stream-decode crate::test_cycle_unwrap( TEXT.as_bytes(), |data| compress(data, 1), |data| crate::decode_all(data), ); // We can stream-encode and bulk-decompress crate::test_cycle_unwrap( TEXT.as_bytes(), |data| crate::encode_all(data, 1), |data| decompress(data, TEXT.len()), ); } #[test] fn has_content_size() { let compressed = compress(TEXT.as_bytes(), 1).unwrap(); // Bulk functions by default include the content size. assert_eq!( zstd_safe::get_frame_content_size(&compressed).unwrap(), Some(TEXT.len() as u64) ); }
//! Train a dictionary from various sources. //! //! A dictionary can help improve the compression of small files. //! The dictionary must be present during decompression, //! but can be shared across multiple "similar" files. //! //! Creating a dictionary using the `zstd` C library, //! using the `zstd` command-line interface, using this library, //! or using the `train` binary provided, should give the same result, //! and are therefore completely compatible. //! //! To use, see [`Encoder::with_dictionary`] or [`Decoder::with_dictionary`]. //! //! [`Encoder::with_dictionary`]: ../struct.Encoder.html#method.with_dictionary //! [`Decoder::with_dictionary`]: ../struct.Decoder.html#method.with_dictionary #[cfg(feature = "zdict_builder")] use std::io::{self, Read}; pub use zstd_safe::{CDict, DDict}; /// Prepared dictionary for compression /// /// A dictionary can include its own copy of the data (if it is `'static`), or it can merely point /// to a separate buffer (if it has another lifetime). pub struct EncoderDictionary<'a> { cdict: CDict<'a>, } impl EncoderDictionary<'static> { /// Creates a prepared dictionary for compression. /// /// This will copy the dictionary internally. pub fn copy(dictionary: &[u8], level: i32) -> Self { Self { cdict: zstd_safe::create_cdict(dictionary, level), } } } impl<'a> EncoderDictionary<'a> { #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] /// Create prepared dictionary for compression /// /// A level of `0` uses zstd's default (currently `3`). /// /// Only available with the `experimental` feature. Use `EncoderDictionary::copy` otherwise. pub fn new(dictionary: &'a [u8], level: i32) -> Self { Self { cdict: zstd_safe::CDict::create_by_reference(dictionary, level), } } /// Returns reference to `CDict` inner object pub fn as_cdict(&self) -> &CDict<'a> { &self.cdict } } /// Prepared dictionary for decompression pub struct DecoderDictionary<'a> { ddict: DDict<'a>, } impl DecoderDictionary<'static> { /// Create a prepared dictionary for decompression. /// /// This will copy the dictionary internally. pub fn copy(dictionary: &[u8]) -> Self { Self { ddict: zstd_safe::DDict::create(dictionary), } } } impl<'a> DecoderDictionary<'a> { #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] /// Create prepared dictionary for decompression /// /// Only available with the `experimental` feature. Use `DecoderDictionary::copy` otherwise. pub fn new(dict: &'a [u8]) -> Self { Self { ddict: zstd_safe::DDict::create_by_reference(dict), } } /// Returns reference to `DDict` inner object pub fn as_ddict(&self) -> &DDict<'a> { &self.ddict } } /// Train a dictionary from a big continuous chunk of data, with all samples /// contiguous in memory. /// /// This is the most efficient way to train a dictionary, /// since this is directly fed into `zstd`. /// /// * `sample_data` is the concatenation of all sample data. /// * `sample_sizes` is the size of each sample in `sample_data`. /// The sum of all `sample_sizes` should equal the length of `sample_data`. /// * `max_size` is the maximum size of the dictionary to generate. /// /// The result is the dictionary data. You can, for example, feed it to [`CDict::create`]. #[cfg(feature = "zdict_builder")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "zdict_builder")))] pub fn from_continuous( sample_data: &[u8], sample_sizes: &[usize], max_size: usize, ) -> io::Result<Vec<u8>> { use crate::map_error_code; // Complain if the lengths don't add up to the entire data. if sample_sizes.iter().sum::<usize>() != sample_data.len() { return Err(io::Error::new( io::ErrorKind::Other, "sample sizes don't add up".to_string(), )); } let mut result = Vec::with_capacity(max_size); zstd_safe::train_from_buffer(&mut result, sample_data, sample_sizes) .map_err(map_error_code)?; Ok(result) } /// Train a dictionary from multiple samples. /// /// The samples will internally be copied to a single continuous buffer, /// so make sure you have enough memory available. /// /// If you need to stretch your system's limits, /// [`from_continuous`] directly uses the given slice. /// /// [`from_continuous`]: ./fn.from_continuous.html /// /// * `samples` is a list of individual samples to train on. /// * `max_size` is the maximum size of the dictionary to generate. /// /// The result is the dictionary data. You can, for example, feed it to [`CDict::create`]. #[cfg(feature = "zdict_builder")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "zdict_builder")))] pub fn from_samples<S: AsRef<[u8]>>( samples: &[S], max_size: usize, ) -> io::Result<Vec<u8>> { // Pre-allocate the entire required size. let total_length: usize = samples.iter().map(|sample| sample.as_ref().len()).sum(); let mut data = Vec::with_capacity(total_length); // Copy every sample to a big chunk of memory data.extend(samples.iter().flat_map(|s| s.as_ref()).cloned()); let sizes: Vec<_> = samples.iter().map(|s| s.as_ref().len()).collect(); from_continuous(&data, &sizes, max_size) } /// Train a dictionary from multiple samples. /// /// Unlike [`from_samples`], this does not require having a list of all samples. /// It also allows running into an error when iterating through the samples. /// /// They will still be copied to a continuous array and fed to [`from_continuous`]. /// /// * `samples` is an iterator of individual samples to train on. /// * `max_size` is the maximum size of the dictionary to generate. /// /// The result is the dictionary data. You can, for example, feed it to [`CDict::create`]. /// /// # Examples /// /// ```rust,no_run /// // Train from a couple of json files. /// let dict_buffer = zstd::dict::from_sample_iterator( /// ["file_a.json", "file_b.json"] /// .into_iter() /// .map(|filename| std::fs::File::open(filename)), /// 10_000, // 10kB dictionary /// ).unwrap(); /// ``` /// /// ```rust,no_run /// use std::io::BufRead as _; /// // Treat each line from stdin as a separate sample. /// let dict_buffer = zstd::dict::from_sample_iterator( /// std::io::stdin().lock().lines().map(|line: std::io::Result<String>| { /// // Transform each line into a `Cursor<Vec<u8>>` so they implement Read. /// line.map(String::into_bytes) /// .map(std::io::Cursor::new) /// }), /// 10_000, // 10kB dictionary /// ).unwrap(); /// ``` #[cfg(feature = "zdict_builder")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "zdict_builder")))] pub fn from_sample_iterator<I, R>( samples: I, max_size: usize, ) -> io::Result<Vec<u8>> where I: IntoIterator<Item = io::Result<R>>, R: Read, { let mut data = Vec::new(); let mut sizes = Vec::new(); for sample in samples { let mut sample = sample?; let len = sample.read_to_end(&mut data)?; sizes.push(len); } from_continuous(&data, &sizes, max_size) } /// Train a dict from a list of files. /// /// * `filenames` is an iterator of files to load. Each file will be treated as an individual /// sample. /// * `max_size` is the maximum size of the dictionary to generate. /// /// The result is the dictionary data. You can, for example, feed it to [`CDict::create`]. #[cfg(feature = "zdict_builder")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "zdict_builder")))] pub fn from_files<I, P>(filenames: I, max_size: usize) -> io::Result<Vec<u8>> where P: AsRef<std::path::Path>, I: IntoIterator<Item = P>, { from_sample_iterator( filenames .into_iter() .map(|filename| std::fs::File::open(filename)), max_size, ) } #[cfg(test)] #[cfg(feature = "zdict_builder")] mod tests { use std::fs; use std::io; use std::io::Read; use walkdir; #[test] fn test_dict_training() { // Train a dictionary let paths: Vec<_> = walkdir::WalkDir::new("src") .into_iter() .map(|entry| entry.unwrap()) .map(|entry| entry.into_path()) .filter(|path| path.to_str().unwrap().ends_with(".rs")) .collect(); let dict = super::from_files(&paths, 4000).unwrap(); for path in paths { let mut buffer = Vec::new(); let mut file = fs::File::open(path).unwrap(); let mut content = Vec::new(); file.read_to_end(&mut content).unwrap(); io::copy( &mut &content[..], &mut crate::stream::Encoder::with_dictionary( &mut buffer, 1, &dict, ) .unwrap() .auto_finish(), ) .unwrap(); let mut result = Vec::new(); io::copy( &mut crate::stream::Decoder::with_dictionary( &buffer[..], &dict[..], ) .unwrap(), &mut result, ) .unwrap(); assert_eq!(&content, &result); } } }
//! Rust binding to the [zstd library][zstd]. //! //! This crate provides: //! //! * An [encoder](stream/write/struct.Encoder.html) to compress data using zstd //! and send the output to another write. //! * A [decoder](stream/read/struct.Decoder.html) to read input data from a `Read` //! and decompress it. //! * Convenient functions for common tasks. //! //! # Example //! //! ```no_run //! use std::io; //! //! // Uncompress input and print the result. //! zstd::stream::copy_decode(io::stdin(), io::stdout()).unwrap(); //! ``` //! //! [zstd]: https://github.com/facebook/zstd #![deny(missing_docs)] #![cfg_attr(feature = "doc-cfg", feature(doc_cfg))] // Re-export the zstd-safe crate. pub use zstd_safe; pub mod bulk; pub mod dict; #[macro_use] pub mod stream; use std::io; /// Default compression level. pub use zstd_safe::CLEVEL_DEFAULT as DEFAULT_COMPRESSION_LEVEL; /// The accepted range of compression levels. pub fn compression_level_range( ) -> std::ops::RangeInclusive<zstd_safe::CompressionLevel> { zstd_safe::min_c_level()..=zstd_safe::max_c_level() } #[doc(no_inline)] pub use crate::stream::{decode_all, encode_all, Decoder, Encoder}; /// Returns the error message as io::Error based on error_code. fn map_error_code(code: usize) -> io::Error { let msg = zstd_safe::get_error_name(code); io::Error::new(io::ErrorKind::Other, msg.to_string()) } // Some helper functions to write full-cycle tests. #[cfg(test)] fn test_cycle<F, G>(data: &[u8], f: F, g: G) where F: Fn(&[u8]) -> Vec<u8>, G: Fn(&[u8]) -> Vec<u8>, { let mid = f(data); let end = g(&mid); assert_eq!(data, &end[..]); } #[cfg(test)] fn test_cycle_unwrap<F, G>(data: &[u8], f: F, g: G) where F: Fn(&[u8]) -> io::Result<Vec<u8>>, G: Fn(&[u8]) -> io::Result<Vec<u8>>, { test_cycle(data, |data| f(data).unwrap(), |data| g(data).unwrap()) } #[test] fn default_compression_level_in_range() { assert!(compression_level_range().contains(&DEFAULT_COMPRESSION_LEVEL)); }
use std::io; use super::{Decoder, Encoder}; /// Decompress from the given source as if using a `Decoder`. /// /// The input data must be in the zstd frame format. pub fn decode_all<R: io::Read>(source: R) -> io::Result<Vec<u8>> { let mut result = Vec::new(); copy_decode(source, &mut result)?; Ok(result) } /// Decompress from the given source as if using a `Decoder`. /// /// Decompressed data will be appended to `destination`. pub fn copy_decode<R, W>(source: R, mut destination: W) -> io::Result<()> where R: io::Read, W: io::Write, { let mut decoder = Decoder::new(source)?; io::copy(&mut decoder, &mut destination)?; Ok(()) } /// Compress all data from the given source as if using an `Encoder`. /// /// Result will be in the zstd frame format. /// /// A level of `0` uses zstd's default (currently `3`). pub fn encode_all<R: io::Read>(source: R, level: i32) -> io::Result<Vec<u8>> { let mut result = Vec::<u8>::new(); copy_encode(source, &mut result, level)?; Ok(result) } /// Compress all data from the given source as if using an `Encoder`. /// /// Compressed data will be appended to `destination`. /// /// A level of `0` uses zstd's default (currently `3`). pub fn copy_encode<R, W>( mut source: R, destination: W, level: i32, ) -> io::Result<()> where R: io::Read, W: io::Write, { let mut encoder = Encoder::new(destination, level)?; io::copy(&mut source, &mut encoder)?; encoder.finish()?; Ok(()) } #[cfg(test)] mod tests {}
//! Compress and decompress Zstd streams. //! //! Zstd streams are the main way to compress and decompress data. //! They are compatible with the `zstd` command-line tool. //! //! This module provides both `Read` and `Write` interfaces to compressing and //! decompressing. pub mod read; pub mod write; mod functions; pub mod zio; #[cfg(test)] mod tests; pub mod raw; pub use self::functions::{copy_decode, copy_encode, decode_all, encode_all}; pub use self::read::Decoder; pub use self::write::{AutoFinishEncoder, Encoder}; #[doc(hidden)] #[macro_export] /// Common functions for the decoder, both in read and write mode. macro_rules! decoder_parameters { () => { /// Sets the maximum back-reference distance. /// /// The actual maximum distance is going to be `2^log_distance`. /// /// This will need to at least match the value set when compressing. pub fn window_log_max(&mut self, log_distance: u32) -> io::Result<()> { self.set_parameter(zstd_safe::DParameter::WindowLogMax( log_distance, )) } #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] /// Enables or disabled expecting the 4-byte magic header /// /// Only available with the `experimental` feature. /// /// This will need to match the settings used when compressing. pub fn include_magicbytes( &mut self, include_magicbytes: bool, ) -> io::Result<()> { self.set_parameter(zstd_safe::DParameter::Format( if include_magicbytes { zstd_safe::FrameFormat::One } else { zstd_safe::FrameFormat::Magicless }, )) } }; } #[doc(hidden)] #[macro_export] /// Common functions for the decoder, both in read and write mode. macro_rules! decoder_common { ($readwrite:ident) => { /// Sets a decompression parameter on the decompression stream. pub fn set_parameter( &mut self, parameter: zstd_safe::DParameter, ) -> io::Result<()> { self.$readwrite.operation_mut().set_parameter(parameter) } $crate::decoder_parameters!(); }; } #[doc(hidden)] #[macro_export] /// Parameter-setters for the encoder. Relies on a `set_parameter` method. macro_rules! encoder_parameters { () => { /// Controls whether zstd should include a content checksum at the end /// of each frame. pub fn include_checksum( &mut self, include_checksum: bool, ) -> io::Result<()> { self.set_parameter(zstd_safe::CParameter::ChecksumFlag( include_checksum, )) } /// Enables multithreaded compression /// /// * If `n_workers == 0` (default), then multithreaded will be /// disabled. /// * If `n_workers >= 1`, then compression will be done in separate /// threads. /// /// So even `n_workers = 1` may increase performance by separating /// IO and compression. /// /// Note: This is only available if the `zstdmt` cargo feature is activated. #[cfg(feature = "zstdmt")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "zstdmt")))] pub fn multithread(&mut self, n_workers: u32) -> io::Result<()> { self.set_parameter(zstd_safe::CParameter::NbWorkers(n_workers)) } /// Enables or disables storing of the dict id. /// /// Defaults to true. If false, the behaviour of decoding with a wrong /// dictionary is undefined. pub fn include_dictid( &mut self, include_dictid: bool, ) -> io::Result<()> { self.set_parameter(zstd_safe::CParameter::DictIdFlag( include_dictid, )) } /// Enables or disabled storing of the contentsize. /// /// Note that this only has an effect if the size is given with `set_pledged_src_size`. pub fn include_contentsize( &mut self, include_contentsize: bool, ) -> io::Result<()> { self.set_parameter(zstd_safe::CParameter::ContentSizeFlag( include_contentsize, )) } /// Enables or disables long-distance matching pub fn long_distance_matching( &mut self, long_distance_matching: bool, ) -> io::Result<()> { self.set_parameter( zstd_safe::CParameter::EnableLongDistanceMatching( long_distance_matching, ), ) } /// Sets the target size for compressed blocks. /// /// A lower block size may result in slightly lower speed (~2%) and compression ratio /// (~0.1%), but may decrease end-to-end latency in low-bandwidth environments (time to /// first decompressed byte). /// /// No value, or a value of zero, results in no contraint for the block sizes. pub fn set_target_cblock_size( &mut self, target_size: Option<u32>, ) -> io::Result<()> { self.set_parameter(zstd_safe::CParameter::TargetCBlockSize( target_size.unwrap_or(0), )) } /// Sets the maximum back-reference distance. /// /// The actual maximum distance is going to be `2^log_distance`. /// /// Note that decompression will need to use at least the same setting. pub fn window_log(&mut self, log_distance: u32) -> io::Result<()> { self.set_parameter(zstd_safe::CParameter::WindowLog(log_distance)) } #[cfg(feature = "experimental")] #[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "experimental")))] /// Enables or disable the magic bytes at the beginning of each frame. /// /// If disabled, include_magicbytes must also be called on the decoder. /// /// Only available with the `experimental` feature. /// /// Note that decompression will need to use the same setting. pub fn include_magicbytes( &mut self, include_magicbytes: bool, ) -> io::Result<()> { self.set_parameter(zstd_safe::CParameter::Format( if include_magicbytes { zstd_safe::FrameFormat::One } else { zstd_safe::FrameFormat::Magicless }, )) } }; } #[doc(hidden)] #[macro_export] /// Common functions for the encoder, both in read and write mode. macro_rules! encoder_common { ($readwrite:ident) => { /// Sets the given zstd compression parameter. pub fn set_parameter( &mut self, parameter: zstd_safe::CParameter, ) -> io::Result<()> { self.$readwrite.operation_mut().set_parameter(parameter) } /// Sets the expected size of the input. /// /// This affects the compression effectiveness. /// /// It is an error to give an incorrect size (an error will be returned when closing the /// stream if the size does not match what was pledged). /// /// Giving a `None` size means the size is unknown (this is the default). pub fn set_pledged_src_size( &mut self, size: Option<u64>, ) -> io::Result<()> { self.$readwrite.operation_mut().set_pledged_src_size(size) } $crate::encoder_parameters!(); }; }
//! Raw in-memory stream compression/decompression. //! //! This module defines a `Decoder` and an `Encoder` to decode/encode streams //! of data using buffers. //! //! They are mostly thin wrappers around `zstd_safe::{DCtx, CCtx}`. use std::io; pub use zstd_safe::{CParameter, DParameter, InBuffer, OutBuffer, WriteBuf}; use crate::dict::{DecoderDictionary, EncoderDictionary}; use crate::map_error_code; /// Represents an abstract compression/decompression operation. /// /// This trait covers both `Encoder` and `Decoder`. pub trait Operation { /// Performs a single step of this operation. /// /// Should return a hint for the next input size. /// /// If the result is `Ok(0)`, it may indicate that a frame was just /// finished. fn run<C: WriteBuf + ?Sized>( &mut self, input: &mut InBuffer<'_>, output: &mut OutBuffer<'_, C>, ) -> io::Result<usize>; /// Performs a single step of this operation. /// /// This is a comvenience wrapper around `Operation::run` if you don't /// want to deal with `InBuffer`/`OutBuffer`. fn run_on_buffers( &mut self, input: &[u8], output: &mut [u8], ) -> io::Result<Status> { let mut input = InBuffer::around(input); let mut output = OutBuffer::around(output); let remaining = self.run(&mut input, &mut output)?; Ok(Status { remaining, bytes_read: input.pos(), bytes_written: output.pos(), }) } /// Flushes any internal buffer, if any. /// /// Returns the number of bytes still in the buffer. /// To flush entirely, keep calling until it returns `Ok(0)`. fn flush<C: WriteBuf + ?Sized>( &mut self, output: &mut OutBuffer<'_, C>, ) -> io::Result<usize> { let _ = output; Ok(0) } /// Prepares the operation for a new frame. /// /// This is hopefully cheaper than creating a new operation. fn reinit(&mut self) -> io::Result<()> { Ok(()) } /// Finishes the operation, writing any footer if necessary. /// /// Returns the number of bytes still to write. /// /// Keep calling this method until it returns `Ok(0)`, /// and then don't ever call this method. fn finish<C: WriteBuf + ?Sized>( &mut self, output: &mut OutBuffer<'_, C>, finished_frame: bool, ) -> io::Result<usize> { let _ = output; let _ = finished_frame; Ok(0) } } /// Dummy operation that just copies its input to the output. pub struct NoOp; impl Operation for NoOp { fn run<C: WriteBuf + ?Sized>( &mut self, input: &mut InBuffer<'_>, output: &mut OutBuffer<'_, C>, ) -> io::Result<usize> { // Skip the prelude let src = &input.src[input.pos..]; // Safe because `output.pos() <= output.capacity()`. let output_pos = output.pos(); let dst = unsafe { output.as_mut_ptr().add(output_pos) }; // Ignore anything past the end let len = usize::min(src.len(), output.capacity() - output_pos); let src = &src[..len]; // Safe because: // * `len` is less than either of the two lengths // * `src` and `dst` do not overlap because we have `&mut` to each. unsafe { std::ptr::copy_nonoverlapping(src.as_ptr(), dst, len) }; input.set_pos(input.pos() + len); unsafe { output.set_pos(output_pos + len) }; Ok(0) } } /// Describes the result of an operation. pub struct Status { /// Number of bytes expected for next input. /// /// * If `remaining = 0`, then we are at the end of a frame. /// * If `remaining > 0`, then it's just a hint for how much there is still /// to read. pub remaining: usize, /// Number of bytes read from the input. pub bytes_read: usize, /// Number of bytes written to the output. pub bytes_written: usize, } /// An in-memory decoder for streams of data. pub struct Decoder<'a> { context: MaybeOwnedDCtx<'a>, } impl Decoder<'static> { /// Creates a new decoder. pub fn new() -> io::Result<Self> { Self::with_dictionary(&[]) } /// Creates a new decoder initialized with the given dictionary. pub fn with_dictionary(dictionary: &[u8]) -> io::Result<Self> { let mut context = zstd_safe::DCtx::create(); context.init().map_err(map_error_code)?; context .load_dictionary(dictionary) .map_err(map_error_code)?; Ok(Decoder { context: MaybeOwnedDCtx::Owned(context), }) } } impl<'a> Decoder<'a> { /// Creates a new decoder which employs the provided context for deserialization. pub fn with_context(context: &'a mut zstd_safe::DCtx<'static>) -> Self { Self { context: MaybeOwnedDCtx::Borrowed(context), } } /// Creates a new decoder, using an existing `DecoderDictionary`. pub fn with_prepared_dictionary<'b>( dictionary: &DecoderDictionary<'b>, ) -> io::Result<Self> where 'b: 'a, { let mut context = zstd_safe::DCtx::create(); context .ref_ddict(dictionary.as_ddict()) .map_err(map_error_code)?; Ok(Decoder { context: MaybeOwnedDCtx::Owned(context), }) } /// Creates a new decoder, using a ref prefix pub fn with_ref_prefix<'b>(ref_prefix: &'b [u8]) -> io::Result<Self> where 'b: 'a, { let mut context = zstd_safe::DCtx::create(); context.ref_prefix(ref_prefix).map_err(map_error_code)?; Ok(Decoder { context: MaybeOwnedDCtx::Owned(context), }) } /// Sets a decompression parameter for this decoder. pub fn set_parameter(&mut self, parameter: DParameter) -> io::Result<()> { match &mut self.context { MaybeOwnedDCtx::Owned(x) => x.set_parameter(parameter), MaybeOwnedDCtx::Borrowed(x) => x.set_parameter(parameter), } .map_err(map_error_code)?; Ok(()) } } impl Operation for Decoder<'_> { fn run<C: WriteBuf + ?Sized>( &mut self, input: &mut InBuffer<'_>, output: &mut OutBuffer<'_, C>, ) -> io::Result<usize> { match &mut self.context { MaybeOwnedDCtx::Owned(x) => x.decompress_stream(output, input), MaybeOwnedDCtx::Borrowed(x) => x.decompress_stream(output, input), } .map_err(map_error_code) } fn flush<C: WriteBuf + ?Sized>( &mut self, output: &mut OutBuffer<'_, C>, ) -> io::Result<usize> { // To flush, we just offer no additional input. self.run(&mut InBuffer::around(&[]), output)?; // We don't _know_ how much (decompressed data) there is still in buffer. if output.pos() < output.capacity() { // We only know when there's none (the output buffer is not full). Ok(0) } else { // Otherwise, pretend there's still "1 byte" remaining. Ok(1) } } fn reinit(&mut self) -> io::Result<()> { match &mut self.context { MaybeOwnedDCtx::Owned(x) => { x.reset(zstd_safe::ResetDirective::SessionOnly) } MaybeOwnedDCtx::Borrowed(x) => { x.reset(zstd_safe::ResetDirective::SessionOnly) } } .map_err(map_error_code)?; Ok(()) } fn finish<C: WriteBuf + ?Sized>( &mut self, _output: &mut OutBuffer<'_, C>, finished_frame: bool, ) -> io::Result<usize> { if finished_frame { Ok(0) } else { Err(io::Error::new( io::ErrorKind::UnexpectedEof, "incomplete frame", )) } } } /// An in-memory encoder for streams of data. pub struct Encoder<'a> { context: MaybeOwnedCCtx<'a>, } impl Encoder<'static> { /// Creates a new encoder. pub fn new(level: i32) -> io::Result<Self> { Self::with_dictionary(level, &[]) } /// Creates a new encoder initialized with the given dictionary. pub fn with_dictionary(level: i32, dictionary: &[u8]) -> io::Result<Self> { let mut context = zstd_safe::CCtx::create(); context .set_parameter(CParameter::CompressionLevel(level)) .map_err(map_error_code)?; context .load_dictionary(dictionary) .map_err(map_error_code)?; Ok(Encoder { context: MaybeOwnedCCtx::Owned(context), }) } } impl<'a> Encoder<'a> { /// Creates a new encoder that uses the provided context for serialization. pub fn with_context(context: &'a mut zstd_safe::CCtx<'static>) -> Self { Self { context: MaybeOwnedCCtx::Borrowed(context), } } /// Creates a new encoder using an existing `EncoderDictionary`. pub fn with_prepared_dictionary<'b>( dictionary: &EncoderDictionary<'b>, ) -> io::Result<Self> where 'b: 'a, { let mut context = zstd_safe::CCtx::create(); context .ref_cdict(dictionary.as_cdict()) .map_err(map_error_code)?; Ok(Encoder { context: MaybeOwnedCCtx::Owned(context), }) } /// Creates a new encoder initialized with the given ref prefix. pub fn with_ref_prefix<'b>( level: i32, ref_prefix: &'b [u8], ) -> io::Result<Self> where 'b: 'a, { let mut context = zstd_safe::CCtx::create(); context .set_parameter(CParameter::CompressionLevel(level)) .map_err(map_error_code)?; context.ref_prefix(ref_prefix).map_err(map_error_code)?; Ok(Encoder { context: MaybeOwnedCCtx::Owned(context), }) } /// Sets a compression parameter for this encoder. pub fn set_parameter(&mut self, parameter: CParameter) -> io::Result<()> { match &mut self.context { MaybeOwnedCCtx::Owned(x) => x.set_parameter(parameter), MaybeOwnedCCtx::Borrowed(x) => x.set_parameter(parameter), } .map_err(map_error_code)?; Ok(()) } /// Sets the size of the input expected by zstd. /// /// May affect compression ratio. /// /// It is an error to give an incorrect size (an error _will_ be returned when closing the /// stream). /// /// If `None` is given, it assume the size is not known (default behaviour). pub fn set_pledged_src_size( &mut self, pledged_src_size: Option<u64>, ) -> io::Result<()> { match &mut self.context { MaybeOwnedCCtx::Owned(x) => { x.set_pledged_src_size(pledged_src_size) } MaybeOwnedCCtx::Borrowed(x) => { x.set_pledged_src_size(pledged_src_size) } } .map_err(map_error_code)?; Ok(()) } } impl<'a> Operation for Encoder<'a> { fn run<C: WriteBuf + ?Sized>( &mut self, input: &mut InBuffer<'_>, output: &mut OutBuffer<'_, C>, ) -> io::Result<usize> { match &mut self.context { MaybeOwnedCCtx::Owned(x) => x.compress_stream(output, input), MaybeOwnedCCtx::Borrowed(x) => x.compress_stream(output, input), } .map_err(map_error_code) } fn flush<C: WriteBuf + ?Sized>( &mut self, output: &mut OutBuffer<'_, C>, ) -> io::Result<usize> { match &mut self.context { MaybeOwnedCCtx::Owned(x) => x.flush_stream(output), MaybeOwnedCCtx::Borrowed(x) => x.flush_stream(output), } .map_err(map_error_code) } fn finish<C: WriteBuf + ?Sized>( &mut self, output: &mut OutBuffer<'_, C>, _finished_frame: bool, ) -> io::Result<usize> { match &mut self.context { MaybeOwnedCCtx::Owned(x) => x.end_stream(output), MaybeOwnedCCtx::Borrowed(x) => x.end_stream(output), } .map_err(map_error_code) } fn reinit(&mut self) -> io::Result<()> { match &mut self.context { MaybeOwnedCCtx::Owned(x) => { x.reset(zstd_safe::ResetDirective::SessionOnly) } MaybeOwnedCCtx::Borrowed(x) => { x.reset(zstd_safe::ResetDirective::SessionOnly) } } .map_err(map_error_code)?; Ok(()) } } enum MaybeOwnedCCtx<'a> { Owned(zstd_safe::CCtx<'a>), Borrowed(&'a mut zstd_safe::CCtx<'static>), } enum MaybeOwnedDCtx<'a> { Owned(zstd_safe::DCtx<'a>), Borrowed(&'a mut zstd_safe::DCtx<'static>), } #[cfg(test)] mod tests { // This requires impl for [u8; N] which is currently behind a feature. #[cfg(feature = "arrays")] #[test] fn test_cycle() { use super::{Decoder, Encoder, InBuffer, Operation, OutBuffer}; let mut encoder = Encoder::new(1).unwrap(); let mut decoder = Decoder::new().unwrap(); // Step 1: compress let mut input = InBuffer::around(b"AbcdefAbcdefabcdef"); let mut output = [0u8; 128]; let mut output = OutBuffer::around(&mut output); loop { encoder.run(&mut input, &mut output).unwrap(); if input.pos == input.src.len() { break; } } encoder.finish(&mut output, true).unwrap(); let initial_data = input.src; // Step 2: decompress let mut input = InBuffer::around(output.as_slice()); let mut output = [0u8; 128]; let mut output = OutBuffer::around(&mut output); loop { decoder.run(&mut input, &mut output).unwrap(); if input.pos == input.src.len() { break; } } assert_eq!(initial_data, output.as_slice()); } }
//! Implement pull-based [`Read`] trait for both compressing and decompressing. use std::io::{self, BufRead, BufReader, Read}; use crate::dict::{DecoderDictionary, EncoderDictionary}; use crate::stream::{raw, zio}; use zstd_safe; #[cfg(test)] mod tests; /// A decoder that decompress input data from another `Read`. /// /// This allows to read a stream of compressed data /// (good for files or heavy network stream). pub struct Decoder<'a, R> { reader: zio::Reader<R, raw::Decoder<'a>>, } /// An encoder that compress input data from another `Read`. pub struct Encoder<'a, R> { reader: zio::Reader<R, raw::Encoder<'a>>, } impl<R: Read> Decoder<'static, BufReader<R>> { /// Creates a new decoder. pub fn new(reader: R) -> io::Result<Self> { let buffer_size = zstd_safe::DCtx::in_size(); Self::with_buffer(BufReader::with_capacity(buffer_size, reader)) } } impl<R: BufRead> Decoder<'static, R> { /// Creates a new decoder around a `BufRead`. pub fn with_buffer(reader: R) -> io::Result<Self> { Self::with_dictionary(reader, &[]) } /// Creates a new decoder, using an existing dictionary. /// /// The dictionary must be the same as the one used during compression. pub fn with_dictionary(reader: R, dictionary: &[u8]) -> io::Result<Self> { let decoder = raw::Decoder::with_dictionary(dictionary)?; let reader = zio::Reader::new(reader, decoder); Ok(Decoder { reader }) } } impl<'a, R: BufRead> Decoder<'a, R> { /// Creates a new decoder which employs the provided context for deserialization. pub fn with_context( reader: R, context: &'a mut zstd_safe::DCtx<'static>, ) -> Self { Self { reader: zio::Reader::new( reader, raw::Decoder::with_context(context), ), } } /// Sets this `Decoder` to stop after the first frame. /// /// By default, it keeps concatenating frames until EOF is reached. #[must_use] pub fn single_frame(mut self) -> Self { self.reader.set_single_frame(); self } /// Creates a new decoder, using an existing `DecoderDictionary`. /// /// The dictionary must be the same as the one used during compression. pub fn with_prepared_dictionary<'b>( reader: R, dictionary: &DecoderDictionary<'b>, ) -> io::Result<Self> where 'b: 'a, { let decoder = raw::Decoder::with_prepared_dictionary(dictionary)?; let reader = zio::Reader::new(reader, decoder); Ok(Decoder { reader }) } /// Creates a new decoder, using a ref prefix. /// /// The prefix must be the same as the one used during compression. pub fn with_ref_prefix<'b>( reader: R, ref_prefix: &'b [u8], ) -> io::Result<Self> where 'b: 'a, { let decoder = raw::Decoder::with_ref_prefix(ref_prefix)?; let reader = zio::Reader::new(reader, decoder); Ok(Decoder { reader }) } /// Recommendation for the size of the output buffer. pub fn recommended_output_size() -> usize { zstd_safe::DCtx::out_size() } /// Acquire a reference to the underlying reader. pub fn get_ref(&self) -> &R { self.reader.reader() } /// Acquire a mutable reference to the underlying reader. /// /// Note that mutation of the reader may result in surprising results if /// this decoder is continued to be used. pub fn get_mut(&mut self) -> &mut R { self.reader.reader_mut() } /// Return the inner `Read`. /// /// Calling `finish()` is not *required* after reading a stream - /// just use it if you need to get the `Read` back. pub fn finish(self) -> R { self.reader.into_inner() } crate::decoder_common!(reader); } impl<R: BufRead> Read for Decoder<'_, R> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.reader.read(buf) } } impl<R: Read> Encoder<'static, BufReader<R>> { /// Creates a new encoder. pub fn new(reader: R, level: i32) -> io::Result<Self> { let buffer_size = zstd_safe::CCtx::in_size(); Self::with_buffer(BufReader::with_capacity(buffer_size, reader), level) } } impl<R: BufRead> Encoder<'static, R> { /// Creates a new encoder around a `BufRead`. pub fn with_buffer(reader: R, level: i32) -> io::Result<Self> { Self::with_dictionary(reader, level, &[]) } /// Creates a new encoder, using an existing dictionary. /// /// The dictionary must be the same as the one used during compression. pub fn with_dictionary( reader: R, level: i32, dictionary: &[u8], ) -> io::Result<Self> { let encoder = raw::Encoder::with_dictionary(level, dictionary)?; let reader = zio::Reader::new(reader, encoder); Ok(Encoder { reader }) } } impl<'a, R: BufRead> Encoder<'a, R> { /// Creates a new encoder, using an existing `EncoderDictionary`. /// /// The dictionary must be the same as the one used during compression. pub fn with_prepared_dictionary<'b>( reader: R, dictionary: &EncoderDictionary<'b>, ) -> io::Result<Self> where 'b: 'a, { let encoder = raw::Encoder::with_prepared_dictionary(dictionary)?; let reader = zio::Reader::new(reader, encoder); Ok(Encoder { reader }) } /// Recommendation for the size of the output buffer. pub fn recommended_output_size() -> usize { zstd_safe::CCtx::out_size() } /// Acquire a reference to the underlying reader. pub fn get_ref(&self) -> &R { self.reader.reader() } /// Acquire a mutable reference to the underlying reader. /// /// Note that mutation of the reader may result in surprising results if /// this encoder is continued to be used. pub fn get_mut(&mut self) -> &mut R { self.reader.reader_mut() } /// Flush any internal buffer. /// /// This ensures all input consumed so far is compressed. /// /// Since it prevents bundling currently buffered data with future input, /// it may affect compression ratio. /// /// * Returns the number of bytes written to `out`. /// * Returns `Ok(0)` when everything has been flushed. pub fn flush(&mut self, out: &mut [u8]) -> io::Result<usize> { self.reader.flush(out) } /// Return the inner `Read`. /// /// Calling `finish()` is not *required* after reading a stream - /// just use it if you need to get the `Read` back. pub fn finish(self) -> R { self.reader.into_inner() } crate::encoder_common!(reader); } impl<R: BufRead> Read for Encoder<'_, R> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.reader.read(buf) } } fn _assert_traits() { use std::io::Cursor; fn _assert_send<T: Send>(_: T) {} _assert_send(Decoder::new(Cursor::new(Vec::new()))); _assert_send(Encoder::new(Cursor::new(Vec::new()), 1)); }
use crate::stream::read::{Decoder, Encoder}; use std::io::Read; #[test] fn test_error_handling() { let invalid_input = b"Abcdefghabcdefgh"; let mut decoder = Decoder::new(&invalid_input[..]).unwrap(); let output = decoder.read_to_end(&mut Vec::new()); assert_eq!(output.is_err(), true); } #[test] fn test_cycle() { let input = b"Abcdefghabcdefgh"; let mut encoder = Encoder::new(&input[..], 1).unwrap(); let mut buffer = Vec::new(); encoder.read_to_end(&mut buffer).unwrap(); let mut decoder = Decoder::new(&buffer[..]).unwrap(); let mut buffer = Vec::new(); decoder.read_to_end(&mut buffer).unwrap(); assert_eq!(input, &buffer[..]); }
use super::{copy_encode, decode_all, encode_all}; use super::{Decoder, Encoder}; use partial_io::{PartialOp, PartialWrite}; use std::io; use std::iter; #[test] fn test_end_of_frame() { use std::io::{Read, Write}; let mut enc = Encoder::new(Vec::new(), 1).unwrap(); enc.write_all(b"foo").unwrap(); let mut compressed = enc.finish().unwrap(); // Add footer/whatever to underlying storage. compressed.push(0); // Drain zstd stream until end-of-frame. let mut dec = Decoder::new(&compressed[..]).unwrap().single_frame(); let mut buf = Vec::new(); dec.read_to_end(&mut buf).unwrap(); assert_eq!(&buf, b"foo", "Error decoding a single frame."); } #[test] fn test_concatenated_frames() { let mut buffer = Vec::new(); copy_encode(&b"foo"[..], &mut buffer, 1).unwrap(); copy_encode(&b"bar"[..], &mut buffer, 2).unwrap(); copy_encode(&b"baz"[..], &mut buffer, 3).unwrap(); assert_eq!( &decode_all(&buffer[..]).unwrap(), b"foobarbaz", "Error decoding concatenated frames." ); } #[test] fn test_flush() { use std::io::Write; let buf = Vec::new(); let mut z = Encoder::new(buf, 19).unwrap(); z.write_all(b"hello").unwrap(); z.flush().unwrap(); // Might corrupt stream let buf = z.finish().unwrap(); let s = decode_all(&buf[..]).unwrap(); assert_eq!(s, b"hello", "Error decoding after flush."); } #[test] fn test_try_finish() { use std::io::Write; let mut z = setup_try_finish(); z.get_mut().set_ops(iter::repeat(PartialOp::Unlimited)); // flush() should continue to work even though write() doesn't. z.flush().unwrap(); let buf = match z.try_finish() { Ok(buf) => buf.into_inner(), Err((_z, e)) => panic!("try_finish failed with {:?}", e), }; // Make sure the multiple try_finish calls didn't screw up the internal // buffer and continued to produce valid compressed data. assert_eq!(&decode_all(&buf[..]).unwrap(), b"hello", "Error decoding"); } #[test] #[should_panic] fn test_write_after_try_finish() { use std::io::Write; let mut z = setup_try_finish(); z.write_all(b"hello world").unwrap(); } fn setup_try_finish() -> Encoder<'static, PartialWrite<Vec<u8>>> { use std::io::Write; let buf = PartialWrite::new(Vec::new(), iter::repeat(PartialOp::Unlimited)); let mut z = Encoder::new(buf, 19).unwrap(); z.write_all(b"hello").unwrap(); z.get_mut() .set_ops(iter::repeat(PartialOp::Err(io::ErrorKind::WouldBlock))); let (z, err) = z.try_finish().unwrap_err(); assert_eq!( err.kind(), io::ErrorKind::WouldBlock, "expected WouldBlock error" ); z } #[test] fn test_failing_write() { use std::io::Write; let buf = PartialWrite::new( Vec::new(), iter::repeat(PartialOp::Err(io::ErrorKind::WouldBlock)), ); let mut z = Encoder::new(buf, 1).unwrap(); // Fill in enough data to make sure the buffer gets written out. let input = vec![b'b'; 128 * 1024]; // This should work even though the inner writer rejects writes. assert_eq!( z.write(&input).unwrap(), 128 * 1024, "did not write all input buffer" ); // The next write would fail (the buffer still has some data in it). assert_eq!( z.write(b"abc").unwrap_err().kind(), io::ErrorKind::WouldBlock, "expected WouldBlock error" ); z.get_mut().set_ops(iter::repeat(PartialOp::Unlimited)); // This shouldn't have led to any corruption. let buf = z.finish().unwrap().into_inner(); assert_eq!( &decode_all(&buf[..]).unwrap(), &input, "WouldBlock errors should not corrupt stream" ); } #[test] fn test_invalid_frame() { use std::io::Read; // I really hope this data is invalid... let data = &[1u8, 2u8, 3u8, 4u8, 5u8]; let mut dec = Decoder::new(&data[..]).unwrap(); assert_eq!( dec.read_to_end(&mut Vec::new()).err().map(|e| e.kind()), Some(io::ErrorKind::Other), "did not encounter expected 'invalid frame' error" ); } #[test] fn test_incomplete_frame() { use std::io::{Read, Write}; let mut enc = Encoder::new(Vec::new(), 1).unwrap(); enc.write_all(b"This is a regular string").unwrap(); let mut compressed = enc.finish().unwrap(); let half_size = compressed.len() - 2; compressed.truncate(half_size); let mut dec = Decoder::new(&compressed[..]).unwrap(); assert_eq!( dec.read_to_end(&mut Vec::new()).err().map(|e| e.kind()), Some(io::ErrorKind::UnexpectedEof), "did not encounter expected EOF error" ); } #[test] fn test_cli_compatibility() { let input = include_bytes!("../../assets/example.txt.zst"); let output = decode_all(&input[..]).unwrap(); let expected = include_bytes!("../../assets/example.txt"); assert_eq!( &output[..], &expected[..], "error decoding cli-compressed data" ); } #[cfg(feature = "legacy")] #[test] fn test_legacy() { use std::fs; use std::io::Read; // Read the content from that file let expected = include_bytes!("../../assets/example.txt"); for version in &[5, 6, 7, 8] { let filename = format!("assets/example.txt.v{}.zst", version); let file = fs::File::open(filename).unwrap(); let mut decoder = Decoder::new(file).unwrap(); let mut buffer = Vec::new(); decoder.read_to_end(&mut buffer).unwrap(); assert_eq!( &expected[..], &buffer[..], "error decompressing legacy version {}", version ); } } // Check that compressing+decompressing some data gives back the original fn test_full_cycle(input: &[u8], level: i32) { crate::test_cycle_unwrap( input, |data| encode_all(data, level), |data| decode_all(data), ); } #[test] fn test_empty() { // Test compressing empty data for level in 1..19 { test_full_cycle(b"", level); } } #[test] fn test_ll_source() { // Where could I find some long text?... let data = include_bytes!("../../zstd-safe/zstd-sys/src/bindings_zstd.rs"); // Test a few compression levels. // TODO: check them all? for level in 1..5 { // Test compressing actual data test_full_cycle(data, level); } } #[test] fn reader_to_writer() { use std::io::{Read, Write}; let clear = include_bytes!("../../assets/example.txt"); // Compress using reader let mut encoder = super::read::Encoder::new(&clear[..], 1).unwrap(); let mut compressed_buffer = Vec::new(); encoder.read_to_end(&mut compressed_buffer).unwrap(); // eprintln!("Compressed Buffer: {:?}", compressed_buffer); // Decompress using writer let mut decompressed_buffer = Vec::new(); let mut decoder = super::write::Decoder::new(&mut decompressed_buffer).unwrap(); decoder.write_all(&compressed_buffer[..]).unwrap(); decoder.flush().unwrap(); // eprintln!("{:?}", decompressed_buffer); assert_eq!(clear, &decompressed_buffer[..]); } #[test] fn test_finish_empty_encoder() { use std::io::Write; let mut enc = Encoder::new(Vec::new(), 0).unwrap(); enc.do_finish().unwrap(); enc.write_all(b"this should not work").unwrap_err(); enc.finish().unwrap(); }
//! Implement push-based [`Write`] trait for both compressing and decompressing. use std::io::{self, Write}; use zstd_safe; use crate::dict::{DecoderDictionary, EncoderDictionary}; use crate::stream::{raw, zio}; #[cfg(test)] mod tests; /// An encoder that compress and forward data to another writer. /// /// This allows to compress a stream of data /// (good for files or heavy network stream). /// /// Don't forget to call [`finish()`] before dropping it! /// /// Alternatively, you can call [`auto_finish()`] to use an /// [`AutoFinishEncoder`] that will finish on drop. /// /// Note: The zstd library has its own internal input buffer (~128kb). /// /// [`finish()`]: #method.finish /// [`auto_finish()`]: #method.auto_finish /// [`AutoFinishEncoder`]: AutoFinishEncoder pub struct Encoder<'a, W: Write> { // output writer (compressed data) writer: zio::Writer<W, raw::Encoder<'a>>, } /// A decoder that decompress and forward data to another writer. /// /// Note that you probably want to `flush()` after writing your stream content. /// You can use [`auto_flush()`] to automatically flush the writer on drop. /// /// [`auto_flush()`]: Decoder::auto_flush pub struct Decoder<'a, W: Write> { // output writer (decompressed data) writer: zio::Writer<W, raw::Decoder<'a>>, } /// A wrapper around an `Encoder<W>` that finishes the stream on drop. /// /// This can be created by the [`auto_finish()`] method on the [`Encoder`]. /// /// [`auto_finish()`]: Encoder::auto_finish /// [`Encoder`]: Encoder pub struct AutoFinishEncoder< 'a, W: Write, F: FnMut(io::Result<W>) = Box<dyn Send + FnMut(io::Result<W>)>, > { // We wrap this in an option to take it during drop. encoder: Option<Encoder<'a, W>>, on_finish: Option<F>, } /// A wrapper around a `Decoder<W>` that flushes the stream on drop. /// /// This can be created by the [`auto_flush()`] method on the [`Decoder`]. /// /// [`auto_flush()`]: Decoder::auto_flush /// [`Decoder`]: Decoder pub struct AutoFlushDecoder< 'a, W: Write, F: FnMut(io::Result<()>) = Box<dyn Send + FnMut(io::Result<()>)>, > { // We wrap this in an option to take it during drop. decoder: Option<Decoder<'a, W>>, on_flush: Option<F>, } impl<'a, W: Write, F: FnMut(io::Result<()>)> AutoFlushDecoder<'a, W, F> { fn new(decoder: Decoder<'a, W>, on_flush: F) -> Self { AutoFlushDecoder { decoder: Some(decoder), on_flush: Some(on_flush), } } /// Acquires a reference to the underlying writer. pub fn get_ref(&self) -> &W { self.decoder.as_ref().unwrap().get_ref() } /// Acquires a mutable reference to the underlying writer. /// /// Note that mutation of the writer may result in surprising results if /// this decoder is continued to be used. /// /// Mostly used for testing purposes. pub fn get_mut(&mut self) -> &mut W { self.decoder.as_mut().unwrap().get_mut() } } impl<W, F> Drop for AutoFlushDecoder<'_, W, F> where W: Write, F: FnMut(io::Result<()>), { fn drop(&mut self) { let mut decoder = self.decoder.take().unwrap(); let result = decoder.flush(); if let Some(mut on_finish) = self.on_flush.take() { on_finish(result); } } } impl<W: Write, F: FnMut(io::Result<()>)> Write for AutoFlushDecoder<'_, W, F> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.decoder.as_mut().unwrap().write(buf) } fn flush(&mut self) -> io::Result<()> { self.decoder.as_mut().unwrap().flush() } } impl<'a, W: Write, F: FnMut(io::Result<W>)> AutoFinishEncoder<'a, W, F> { fn new(encoder: Encoder<'a, W>, on_finish: F) -> Self { AutoFinishEncoder { encoder: Some(encoder), on_finish: Some(on_finish), } } /// Acquires a reference to the underlying writer. pub fn get_ref(&self) -> &W { self.encoder.as_ref().unwrap().get_ref() } /// Acquires a mutable reference to the underlying writer. /// /// Note that mutation of the writer may result in surprising results if /// this encoder is continued to be used. /// /// Mostly used for testing purposes. pub fn get_mut(&mut self) -> &mut W { self.encoder.as_mut().unwrap().get_mut() } } impl<W: Write, F: FnMut(io::Result<W>)> Drop for AutoFinishEncoder<'_, W, F> { fn drop(&mut self) { let result = self.encoder.take().unwrap().finish(); if let Some(mut on_finish) = self.on_finish.take() { on_finish(result); } } } impl<W: Write, F: FnMut(io::Result<W>)> Write for AutoFinishEncoder<'_, W, F> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.encoder.as_mut().unwrap().write(buf) } fn flush(&mut self) -> io::Result<()> { self.encoder.as_mut().unwrap().flush() } } impl<W: Write> Encoder<'static, W> { /// Creates a new encoder. /// /// `level`: compression level (1-22). /// /// A level of `0` uses zstd's default (currently `3`). pub fn new(writer: W, level: i32) -> io::Result<Self> { Self::with_dictionary(writer, level, &[]) } /// Creates a new encoder, using an existing dictionary. /// /// (Provides better compression ratio for small files, /// but requires the dictionary to be present during decompression.) /// /// A level of `0` uses zstd's default (currently `3`). pub fn with_dictionary( writer: W, level: i32, dictionary: &[u8], ) -> io::Result<Self> { let encoder = raw::Encoder::with_dictionary(level, dictionary)?; Ok(Self::with_encoder(writer, encoder)) } } impl<'a, W: Write> Encoder<'a, W> { /// Creates a new encoder from a prepared zio writer. pub fn with_writer(writer: zio::Writer<W, raw::Encoder<'a>>) -> Self { Self { writer } } /// Creates a new encoder from the given `Write` and raw encoder. pub fn with_encoder(writer: W, encoder: raw::Encoder<'a>) -> Self { let writer = zio::Writer::new(writer, encoder); Self::with_writer(writer) } /// Creates an encoder that uses the provided context to compress a stream. pub fn with_context( writer: W, context: &'a mut zstd_safe::CCtx<'static>, ) -> Self { let encoder = raw::Encoder::with_context(context); Self::with_encoder(writer, encoder) } /// Creates a new encoder, using an existing prepared `EncoderDictionary`. /// /// (Provides better compression ratio for small files, /// but requires the dictionary to be present during decompression.) pub fn with_prepared_dictionary<'b>( writer: W, dictionary: &EncoderDictionary<'b>, ) -> io::Result<Self> where 'b: 'a, { let encoder = raw::Encoder::with_prepared_dictionary(dictionary)?; Ok(Self::with_encoder(writer, encoder)) } /// Creates a new encoder, using a ref prefix pub fn with_ref_prefix<'b>( writer: W, level: i32, ref_prefix: &'b [u8], ) -> io::Result<Self> where 'b: 'a, { let encoder = raw::Encoder::with_ref_prefix(level, ref_prefix)?; Ok(Self::with_encoder(writer, encoder)) } /// Returns a wrapper around `self` that will finish the stream on drop. pub fn auto_finish(self) -> AutoFinishEncoder<'a, W> { AutoFinishEncoder { encoder: Some(self), on_finish: None, } } /// Returns an encoder that will finish the stream on drop. /// /// Calls the given callback with the result from `finish()`. This runs during drop so it's /// important that the provided callback doesn't panic. pub fn on_finish<F: FnMut(io::Result<W>)>( self, f: F, ) -> AutoFinishEncoder<'a, W, F> { AutoFinishEncoder::new(self, f) } /// Acquires a reference to the underlying writer. pub fn get_ref(&self) -> &W { self.writer.writer() } /// Acquires a mutable reference to the underlying writer. /// /// Note that mutation of the writer may result in surprising results if /// this encoder is continued to be used. pub fn get_mut(&mut self) -> &mut W { self.writer.writer_mut() } /// **Required**: Finishes the stream. /// /// You *need* to finish the stream when you're done writing, either with /// this method or with [`try_finish(self)`](#method.try_finish). /// /// This returns the inner writer in case you need it. /// /// To get back `self` in case an error happened, use `try_finish`. /// /// **Note**: If you don't want (or can't) call `finish()` manually after /// writing your data, consider using `auto_finish()` to get an /// `AutoFinishEncoder`. pub fn finish(self) -> io::Result<W> { self.try_finish().map_err(|(_, err)| err) } /// **Required**: Attempts to finish the stream. /// /// You *need* to finish the stream when you're done writing, either with /// this method or with [`finish(self)`](#method.finish). /// /// This returns the inner writer if the finish was successful, or the /// object plus an error if it wasn't. /// /// `write` on this object will panic after `try_finish` has been called, /// even if it fails. pub fn try_finish(mut self) -> Result<W, (Self, io::Error)> { match self.writer.finish() { // Return the writer, because why not Ok(()) => Ok(self.writer.into_inner().0), Err(e) => Err((self, e)), } } /// Attempts to finish the stream. /// /// You *need* to finish the stream when you're done writing, either with /// this method or with [`finish(self)`](#method.finish). pub fn do_finish(&mut self) -> io::Result<()> { self.writer.finish() } /// Return a recommendation for the size of data to write at once. pub fn recommended_input_size() -> usize { zstd_safe::CCtx::in_size() } crate::encoder_common!(writer); } impl<'a, W: Write> Write for Encoder<'a, W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.writer.write(buf) } fn flush(&mut self) -> io::Result<()> { self.writer.flush() } } impl<W: Write> Decoder<'static, W> { /// Creates a new decoder. pub fn new(writer: W) -> io::Result<Self> { Self::with_dictionary(writer, &[]) } /// Creates a new decoder, using an existing dictionary. /// /// (Provides better compression ratio for small files, /// but requires the dictionary to be present during decompression.) pub fn with_dictionary(writer: W, dictionary: &[u8]) -> io::Result<Self> { let decoder = raw::Decoder::with_dictionary(dictionary)?; Ok(Self::with_decoder(writer, decoder)) } } impl<'a, W: Write> Decoder<'a, W> { /// Creates a new decoder around the given prepared zio writer. /// /// # Examples /// /// ```rust /// fn wrap<W: std::io::Write>(writer: W) -> zstd::stream::write::Decoder<'static, W> { /// let decoder = zstd::stream::raw::Decoder::new().unwrap(); /// let writer = zstd::stream::zio::Writer::new(writer, decoder); /// zstd::stream::write::Decoder::with_writer(writer) /// } /// ``` pub fn with_writer(writer: zio::Writer<W, raw::Decoder<'a>>) -> Self { Decoder { writer } } /// Creates a new decoder around the given `Write` and raw decoder. pub fn with_decoder(writer: W, decoder: raw::Decoder<'a>) -> Self { let writer = zio::Writer::new(writer, decoder); Decoder { writer } } /// Creates a new decoder, using an existing prepared `DecoderDictionary`. /// /// (Provides better compression ratio for small files, /// but requires the dictionary to be present during decompression.) pub fn with_prepared_dictionary<'b>( writer: W, dictionary: &DecoderDictionary<'b>, ) -> io::Result<Self> where 'b: 'a, { let decoder = raw::Decoder::with_prepared_dictionary(dictionary)?; Ok(Self::with_decoder(writer, decoder)) } /// Acquires a reference to the underlying writer. pub fn get_ref(&self) -> &W { self.writer.writer() } /// Acquires a mutable reference to the underlying writer. /// /// Note that mutation of the writer may result in surprising results if /// this decoder is continued to be used. pub fn get_mut(&mut self) -> &mut W { self.writer.writer_mut() } /// Returns the inner `Write`. pub fn into_inner(self) -> W { self.writer.into_inner().0 } /// Return a recommendation for the size of data to write at once. pub fn recommended_input_size() -> usize { zstd_safe::DCtx::in_size() } /// Returns a wrapper around `self` that will flush the stream on drop. pub fn auto_flush(self) -> AutoFlushDecoder<'a, W> { AutoFlushDecoder { decoder: Some(self), on_flush: None, } } /// Returns a decoder that will flush the stream on drop. /// /// Calls the given callback with the result from `flush()`. This runs during drop so it's /// important that the provided callback doesn't panic. pub fn on_flush<F: FnMut(io::Result<()>)>( self, f: F, ) -> AutoFlushDecoder<'a, W, F> { AutoFlushDecoder::new(self, f) } crate::decoder_common!(writer); } impl<W: Write> Write for Decoder<'_, W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.writer.write(buf) } fn flush(&mut self) -> io::Result<()> { self.writer.flush() } } fn _assert_traits() { fn _assert_send<T: Send>(_: T) {} _assert_send(Decoder::new(Vec::new())); _assert_send(Encoder::new(Vec::new(), 1)); _assert_send(Decoder::new(Vec::new()).unwrap().auto_flush()); _assert_send(Encoder::new(Vec::new(), 1).unwrap().auto_finish()); }
use std::io::{Cursor, Write}; use std::iter; use partial_io::{PartialOp, PartialWrite}; use crate::stream::decode_all; use crate::stream::write::{Decoder, Encoder}; #[test] fn test_cycle() { let input = b"Abcdefghabcdefgh"; let buffer = Cursor::new(Vec::new()); let mut encoder = Encoder::new(buffer, 1).unwrap(); encoder.write_all(input).unwrap(); let encoded = encoder.finish().unwrap().into_inner(); // println!("Encoded: {:?}", encoded); let buffer = Cursor::new(Vec::new()); let mut decoder = Decoder::new(buffer).unwrap(); decoder.write_all(&encoded).unwrap(); decoder.flush().unwrap(); let decoded = decoder.into_inner().into_inner(); assert_eq!(input, &decoded[..]); } /// Test that flush after a partial write works successfully without /// corrupting the frame. This test is in this module because it checks /// internal implementation details. #[test] fn test_partial_write_flush() { let input = vec![b'b'; 128 * 1024]; let mut z = setup_partial_write(&input); // flush shouldn't corrupt the stream z.flush().unwrap(); let buf = z.finish().unwrap().into_inner(); assert_eq!(&decode_all(&buf[..]).unwrap(), &input); } /// Test that finish after a partial write works successfully without /// corrupting the frame. This test is in this module because it checks /// internal implementation details. #[test] fn test_partial_write_finish() { let input = vec![b'b'; 128 * 1024]; let z = setup_partial_write(&input); // finish shouldn't corrupt the stream let buf = z.finish().unwrap().into_inner(); assert_eq!(&decode_all(&buf[..]).unwrap(), &input); } fn setup_partial_write(input_data: &[u8]) -> Encoder<PartialWrite<Vec<u8>>> { let buf = PartialWrite::new(Vec::new(), iter::repeat(PartialOp::Limited(1))); let mut z = Encoder::new(buf, 1).unwrap(); // Fill in enough data to make sure the buffer gets written out. z.write(input_data).unwrap(); { let inner = &mut z.writer; // At this point, the internal buffer in z should have some data. assert_ne!(inner.offset(), inner.buffer().len()); } z }
//! Wrappers around raw operations implementing `std::io::{Read, Write}`. mod reader; mod writer; pub use self::reader::Reader; pub use self::writer::Writer;