Comparing: v0.2.2 → 7c74207e5cff878951f9fd3648d293a19291a607
Diff stats: 49 files changed, 3288 insertions(+), 1474 deletions(-)
| Metric | Total | Delta |
|---|---|---|
| Unsafe blocks (production) | 223 | +36 🆕 |
| Unsafe blocks (tests) | 11 | +2 |
| Files with unsafe code | 18 | - |
| Security-relevant patterns | 185 | +53 |
| Dependency changes | 0 | - |
| Files changed | 49 | +3288/-1474 |
Assuming
v0.2.2was already audited. Showing only new changes to review.
| Finding | |
|---|---|
| 🔴 | 36 new unsafe blocks to review - significant effort |
| ℹ️ | 2 new unsafe blocks in tests (lower priority) |
| 🔴 | +6 new transmute usages - careful review needed |
| +47 new raw pointer operations | |
| ℹ️ | 187 existing unsafe blocks (already audited in v0.2.2) |
ub-risk-2orub-risk-3: Significant unsafe code presentsafe-to-run: Filesystem/network access present, review scopedoes-not-implement-crypto: No crypto implementations found
These unsafe blocks were added in this diff and require careful review:
jxl/src/image/raw.rs:89 (unsafe_block)
let row = &mut unsafe { self.data.row_mut(row + offset.1) }[offset.0..end];
// SAFETY: MaybeUninit<u8> and u8 have the same size and layout, and our safety invariant
// guarantees the data is initialized.
unsafe { std::slice::from_raw_parts_mut(row.as_mut_ptr().cast::<u8>(), row.len()) }
}
#[inline(always)]
pub fn row(&self, row: usize) -> &[u8] {jxl/src/image/raw.rs:100 (unsafe_block)
let row = &unsafe { self.data.row(row + offset.1) }[offset.0..end];
// SAFETY: MaybeUninit<u8> and u8 have the same size and layout, and our safety invariant
// guarantees the data is initialized.
unsafe { std::slice::from_raw_parts(row.as_ptr().cast::<u8>(), row.len()) }
}
pub fn byte_size(&self) -> (usize, usize) {
let size = self.data.byte_size();jxl/src/image/raw.rs:152 (unsafe_block)
let row = unsafe { self.data.row(row) };
// SAFETY: MaybeUninit<u8> and u8 have the same size and layout, and our safety invariant
// guarantees the data is initialized.
unsafe { std::slice::from_raw_parts(row.as_ptr().cast::<u8>(), row.len()) }
}
pub fn rect(&self, rect: Rect) -> RawImageRect<'a> {
Self {jxl/src/image/raw.rs:184 (unsafe_block)
let row = unsafe { self.data.row_mut(row) };
// SAFETY: MaybeUninit<u8> and u8 have the same size and layout, and our safety invariant
// guarantees the data is initialized.
unsafe { std::slice::from_raw_parts_mut(row.as_mut_ptr().cast::<u8>(), row.len()) }
}
pub fn rect_mut(&'_ mut self, rect: Rect) -> RawImageRectMut<'_> {
Self {jxl_simd/src/aarch64/neon.rs:281 (unsafe_block)
assert!(src.len() >= 2 * Self::LEN);
// SAFETY: we just checked that `src` has enough space, and neon is available
// from the safety invariant on `d`.
let float32x4x2_t(a, b) = unsafe { vld2q_f32(src.as_ptr()) };
(Self(a, d), Self(b, d))
}
#[inline(always)]jxl_simd/src/aarch64/neon.rs:290 (unsafe_block)
assert!(src.len() >= 3 * Self::LEN);
// SAFETY: we just checked that `src` has enough space, and neon is available
// from the safety invariant on `d`.
let float32x4x3_t(a, b, c) = unsafe { vld3q_f32(src.as_ptr()) };
(Self(a, d), Self(b, d), Self(c, d))
}
#[inline(always)]jxl_simd/src/aarch64/neon.rs:299 (unsafe_block)
assert!(src.len() >= 4 * Self::LEN);
// SAFETY: we just checked that `src` has enough space, and neon is available
// from the safety invariant on `d`.
let float32x4x4_t(a, b, c, e) = unsafe { vld4q_f32(src.as_ptr()) };
(Self(a, d), Self(b, d), Self(c, d), Self(e, d))
}
#[inline(always)]jxl_simd/src/aarch64/neon.rs:455 (unsafe_block)
// BF16 is the high 16 bits of f32
// SAFETY: neon is available from target_feature
let (table_lo, table_hi) =
unsafe { (vld1q_f32(table.as_ptr()), vld1q_f32(table.as_ptr().add(4))) };
// Reinterpret as u32 to extract high 16 bits
let table_lo_u32 = vreinterpretq_u32_f32(table_lo);
let table_hi_u32 = vreinterpretq_u32_f32(table_hi);jxl_simd/src/aarch64/neon.rs:470 (unsafe_block)
vreinterpretq_u8_u16(bf16_table_u16)
}
// SAFETY: neon is available from the safety invariant on the descriptor
Bf16Table8Neon(unsafe { prepare_impl(table) })
}
#[inline(always)]
fn table_lookup_bf16_8(d: NeonDescriptor, table: Bf16Table8Neon, indices: I32VecNeon) -> Self {jxl_simd/src/aarch64/neon.rs:500 (unsafe_block)
vreinterpretq_f32_u8(result)
}
// SAFETY: neon is available from the safety invariant on the descriptor
F32VecNeon(unsafe { lookup_impl(table.0, indices.0) }, d)
}
}
impl Add<F32VecNeon> for F32VecNeon {jxl_simd/src/x86_64/avx.rs:405 (unsafe_block)
// Input: [a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7]
// Output: a = [a0, a1, a2, a3, a4, a5, a6, a7], b = [b0, b1, b2, b3, b4, b5, b6, b7]
// SAFETY: we just checked that src has enough space.
let (in0, in1) = unsafe {
(
_mm256_loadu_ps(src.as_ptr()), // [a0,b0,a1,b1, a2,b2,a3,b3]
_mm256_loadu_ps(src.as_ptr().add(8)), // [a4,b4,a5,b5, a6,b6,a7,b7]
)jxl_simd/src/x86_64/avx.rs:425 (unsafe_block)
}
// SAFETY: avx2 is available from the safety invariant on the descriptor.
let (a, b) = unsafe { load_deinterleaved_2_impl(src) };
(Self(a, d), Self(b, d))
}
#[inline(always)]jxl_simd/src/x86_64/avx.rs:442 (unsafe_block)
// Output: a = [a0..a7], b = [b0..b7], c = [c0..c7]
// SAFETY: we just checked that src has enough space.
let (in0, in1, in2) = unsafe {
(
_mm256_loadu_ps(src.as_ptr()),
_mm256_loadu_ps(src.as_ptr().add(8)),
_mm256_loadu_ps(src.as_ptr().add(16)),jxl_simd/src/x86_64/avx.rs:487 (unsafe_block)
}
// SAFETY: avx2 is available from the safety invariant on the descriptor.
let (a, b, c) = unsafe { load_deinterleaved_3_impl(src) };
(Self(a, d), Self(b, d), Self(c, d))
}
#[inline(always)]jxl_simd/src/x86_64/avx.rs:500 (unsafe_block)
// Input: [a0,b0,c0,d0, a1,b1,c1,d1, a2,b2,c2,d2, a3,b3,c3,d3, ...]
// Output: a = [a0..a7], b = [b0..b7], c = [c0..c7], d = [d0..d7]
// SAFETY: we just checked that src has enough space.
let (in0, in1, in2, in3) = unsafe {
(
_mm256_loadu_ps(src.as_ptr()), // [a0,b0,c0,d0, a1,b1,c1,d1]
_mm256_loadu_ps(src.as_ptr().add(8)), // [a2,b2,c2,d2, a3,b3,c3,d3]
_mm256_loadu_ps(src.as_ptr().add(16)), // [a4,b4,c4,d4, a5,b5,c5,d5]jxl_simd/src/x86_64/avx.rs:533 (unsafe_block)
}
// SAFETY: avx2 is available from the safety invariant on the descriptor.
let (a, b, c, dv) = unsafe { load_deinterleaved_4_impl(src) };
(Self(a, d), Self(b, d), Self(c, d), Self(dv, d))
}
fn_avx!(this: F32VecAvx, fn mul_add(mul: F32VecAvx, add: F32VecAvx) -> F32VecAvx {jxl_simd/src/x86_64/avx.rs:608 (unsafe_block)
fn prepare_table_bf16_8(_d: AvxDescriptor, table: &[f32; 8]) -> Bf16Table8Avx {
// For AVX2, vpermps is exact and fast, so we just load the table as-is
// SAFETY: avx2 is available from the safety invariant on the descriptor
Bf16Table8Avx(unsafe { _mm256_loadu_ps(table.as_ptr()) })
}
#[inline(always)]
fn table_lookup_bf16_8(d: AvxDescriptor, table: Bf16Table8Avx, indices: I32VecAvx) -> Self {jxl_simd/src/x86_64/avx.rs:614 (unsafe_block)
#[inline(always)]
fn table_lookup_bf16_8(d: AvxDescriptor, table: Bf16Table8Avx, indices: I32VecAvx) -> Self {
// SAFETY: avx2 is available from the safety invariant on the descriptor
F32VecAvx(unsafe { _mm256_permutevar8x32_ps(table.0, indices.0) }, d)
}
#[inline(always)]
fn round_store_u8(self, dest: &mut [u8]) {jxl_simd/src/x86_64/avx512.rs:458 (unsafe_block)
// Input: [a0,b0,a1,b1,...,a15,b15]
// Output: a = [a0..a15], b = [b0..b15]
// SAFETY: we just checked that src has enough space.
let (in0, in1) = unsafe {
(
_mm512_loadu_ps(src.as_ptr()),
_mm512_loadu_ps(src.as_ptr().add(16)),
)jxl_simd/src/x86_64/avx512.rs:478 (unsafe_block)
}
// SAFETY: avx512f is available from the safety invariant on the descriptor.
let (a, b) = unsafe { load_deinterleaved_2_impl(src) };
(Self(a, d), Self(b, d))
}
#[inline(always)]jxl_simd/src/x86_64/avx512.rs:495 (unsafe_block)
// Output: a = [a0..a15], b = [b0..b15], c = [c0..c15]
// SAFETY: we just checked that src has enough space.
let (in0, in1, in2) = unsafe {
(
_mm512_loadu_ps(src.as_ptr()),
_mm512_loadu_ps(src.as_ptr().add(16)),
_mm512_loadu_ps(src.as_ptr().add(32)),jxl_simd/src/x86_64/avx512.rs:535 (unsafe_block)
}
// SAFETY: avx512f is available from the safety invariant on the descriptor.
let (a, b, c) = unsafe { load_deinterleaved_3_impl(src) };
(Self(a, d), Self(b, d), Self(c, d))
}
#[inline(always)]jxl_simd/src/x86_64/avx512.rs:548 (unsafe_block)
// Input: [a0,b0,c0,d0,a1,b1,c1,d1,...] (64 floats)
// Output: a = [a0..a15], b = [b0..b15], c = [c0..c15], d = [d0..d15]
// SAFETY: we just checked that src has enough space.
let (in0, in1, in2, in3) = unsafe {
(
_mm512_loadu_ps(src.as_ptr()),
_mm512_loadu_ps(src.as_ptr().add(16)),
_mm512_loadu_ps(src.as_ptr().add(32)),jxl_simd/src/x86_64/avx512.rs:586 (unsafe_block)
}
// SAFETY: avx512f is available from the safety invariant on the descriptor.
let (a, b, c, dv) = unsafe { load_deinterleaved_4_impl(src) };
(Self(a, d), Self(b, d), Self(c, d), Self(dv, d))
}
fn_avx!(this: F32VecAvx512, fn mul_add(mul: F32VecAvx512, add: F32VecAvx512) -> F32VecAvx512 {jxl_simd/src/x86_64/avx512.rs:669 (unsafe_block)
#[inline]
fn prepare_impl(table: &[f32; 8]) -> __m512 {
// SAFETY: avx512f is available from target_feature
let table_256 = unsafe { _mm256_loadu_ps(table.as_ptr()) };
// Zero-extend to 512-bit; vpermutexvar with indices 0-7 only reads first 256 bits
_mm512_castps256_ps512(table_256)
}
// SAFETY: avx512f is available from the safety invariant on the descriptorjxl_simd/src/x86_64/avx512.rs:674 (unsafe_block)
_mm512_castps256_ps512(table_256)
}
// SAFETY: avx512f is available from the safety invariant on the descriptor
Bf16Table8Avx512(unsafe { prepare_impl(table) })
}
#[inline(always)]
fn table_lookup_bf16_8(jxl_simd/src/x86_64/avx512.rs:684 (unsafe_block)
indices: I32VecAvx512,
) -> Self {
// SAFETY: avx512f is available from the safety invariant on the descriptor
F32VecAvx512(unsafe { _mm512_permutexvar_ps(indices.0, table.0) }, d)
}
#[inline(always)]
fn round_store_u8(self, dest: &mut [u8]) {jxl_simd/src/x86_64/sse42.rs:315 (unsafe_block)
// Input: [a0, b0, a1, b1, a2, b2, a3, b3]
// Output: a = [a0, a1, a2, a3], b = [b0, b1, b2, b3]
// SAFETY: we just checked that src has enough space.
let (in0, in1) = unsafe {
(
_mm_loadu_ps(src.as_ptr()), // [a0, b0, a1, b1]
_mm_loadu_ps(src.as_ptr().add(4)), // [a2, b2, a3, b3]
)jxl_simd/src/x86_64/sse42.rs:330 (unsafe_block)
}
// SAFETY: sse4.2 is available from the safety invariant on the descriptor.
let (a, b) = unsafe { load_deinterleaved_2_impl(src) };
(Self(a, d), Self(b, d))
}
#[inline(always)]jxl_simd/src/x86_64/sse42.rs:344 (unsafe_block)
// Output: a = [a0, a1, a2, a3], b = [b0, b1, b2, b3], c = [c0, c1, c2, c3]
// SAFETY: we just checked that src has enough space.
let (in0, in1, in2) = unsafe {
(
_mm_loadu_ps(src.as_ptr()), // [a0, b0, c0, a1]
_mm_loadu_ps(src.as_ptr().add(4)), // [b1, c1, a2, b2]
_mm_loadu_ps(src.as_ptr().add(8)), // [c2, a3, b3, c3]jxl_simd/src/x86_64/sse42.rs:380 (unsafe_block)
}
// SAFETY: sse4.2 is available from the safety invariant on the descriptor.
let (a, b, c) = unsafe { load_deinterleaved_3_impl(src) };
(Self(a, d), Self(b, d), Self(c, d))
}
#[inline(always)]jxl_simd/src/x86_64/sse42.rs:393 (unsafe_block)
// Input: [a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, a3, b3, c3, d3]
// Output: a = [a0, a1, a2, a3], b = [b0, b1, b2, b3], c = [c0, c1, c2, c3], d = [d0, d1, d2, d3]
// SAFETY: we just checked that src has enough space.
let (in0, in1, in2, in3) = unsafe {
(
_mm_loadu_ps(src.as_ptr()), // [a0, b0, c0, d0]
_mm_loadu_ps(src.as_ptr().add(4)), // [a1, b1, c1, d1]
_mm_loadu_ps(src.as_ptr().add(8)), // [a2, b2, c2, d2]jxl_simd/src/x86_64/sse42.rs:419 (unsafe_block)
}
// SAFETY: sse4.2 is available from the safety invariant on the descriptor.
let (a, b, c, dv) = unsafe { load_deinterleaved_4_impl(src) };
(Self(a, d), Self(b, d), Self(c, d), Self(dv, d))
}
fn_sse42!(this: F32VecSse42, fn mul_add(mul: F32VecSse42, add: F32VecSse42) -> F32VecSse42 {jxl_simd/src/x86_64/sse42.rs:508 (unsafe_block)
// Convert f32 table to BF16 packed in 128 bits (16 bytes for 8 entries)
// BF16 is the high 16 bits of f32
// SAFETY: table has exactly 8 elements and sse4.2 is available from target_feature
let (table_lo, table_hi) = unsafe {
(
_mm_loadu_ps(table.as_ptr()),
_mm_loadu_ps(table.as_ptr().add(4)),
)jxl_simd/src/x86_64/sse42.rs:528 (unsafe_block)
_mm_unpacklo_epi64(bf16_lo, bf16_hi)
}
// SAFETY: sse4.2 is available from the safety invariant on the descriptor
Bf16Table8Sse42(unsafe { prepare_impl(table) })
}
#[inline(always)]
fn table_lookup_bf16_8(jxl_simd/src/x86_64/sse42.rs:561 (unsafe_block)
_mm_castsi128_ps(result)
}
// SAFETY: sse4.2 is available from the safety invariant on the descriptor
F32VecSse42(unsafe { lookup_impl(table.0, indices.0) }, d)
}
#[inline(always)]
fn round_store_u8(self, dest: &mut [u8]) {Click to expand all unsafe code details
jxl/src/image/internal.rs:43 (unsafe_impl)
// SAFETY: The safety invariant on RawImageBuffer enforces ownership rules on the contained data,
// there is no reason for it not to be Send.
unsafe impl Send for RawImageBuffer {}
// SAFETY: RawImageBuffer does not use any kind of interior mutability, so it is safe to share
// between threads.
unsafe impl Sync for RawImageBuffer {}jxl/src/image/internal.rs:47 (unsafe_impl)
// SAFETY: RawImageBuffer does not use any kind of interior mutability, so it is safe to share
// between threads.
unsafe impl Sync for RawImageBuffer {}
impl RawImageBuffer {
pub(super) fn check_vals(num_rows: usize, bytes_per_row: usize, bytes_between_rows: usize) {
if num_rows > 0 {jxl/src/image/internal.rs:87 (unsafe_fn)
///
/// Note: these safety requirements match those of JxlOutputBuffer::new_from_ptr, except we
/// only request validity for reads.
pub(super) unsafe fn new_from_ptr(
buf: *mut MaybeUninit<u8>,
num_rows: usize,
bytes_per_row: usize,
bytes_between_rows: usize,jxl/src/image/internal.rs:135 (unsafe_fn)
/// - The caller must ensure that ownership rules are respected (for example, because they
/// have exclusive access to the data).
#[inline(always)]
pub(super) unsafe fn row_mut(&mut self, row: usize) -> &mut [MaybeUninit<u8>] {
// SAFETY: The safety requirements for distinct_rows_mut match the ones for row_mut.
unsafe { self.distinct_rows_mut([row])[0] }
}jxl/src/image/internal.rs:137 (unsafe_block)
#[inline(always)]
pub(super) unsafe fn row_mut(&mut self, row: usize) -> &mut [MaybeUninit<u8>] {
// SAFETY: The safety requirements for distinct_rows_mut match the ones for row_mut.
unsafe { self.distinct_rows_mut([row])[0] }
}
/// Note: this is quadratic in the number of rows.
/// # Safety
jxl/src/image/internal.rs:146 (unsafe_fn)
/// - The caller must ensure that ownership rules are respected (for example, because they
/// have exclusive access to the data).
#[inline(always)]
pub(super) unsafe fn distinct_rows_mut<I: DistinctRowsIndexes>(
&mut self,
rows: I,
) -> I::Output<'_, MaybeUninit<u8>> {
// SAFETY: the safety requirements of `get_rows_mut` are the same as the ones forjxl/src/image/internal.rs:152 (unsafe_block)
) -> I::Output<'_, MaybeUninit<u8>> {
// SAFETY: the safety requirements of `get_rows_mut` are the same as the ones for
// `distinct_rows_mut`.
unsafe { rows.get_rows_mut(self) }
}
/// # Safety
/// The caller must ensure that ownership and lifetime rules are respected (for example,
jxl/src/image/internal.rs:159 (unsafe_fn)
/// The caller must ensure that ownership and lifetime rules are respected (for example,
/// because they have shared access to the data).
#[inline(always)]
pub(super) unsafe fn row(&self, row: usize) -> &[MaybeUninit<u8>] {
assert!(row < self.num_rows);
let start = row * self.bytes_between_rows;
// SAFETY: `start` is guaranteed to be <= isize::MAX, and `self.buf + start` is guaranteed
// to fit within the same allocated object, as per safety invariants of this struct.jxl/src/image/internal.rs:166 (unsafe_block)
// to fit within the same allocated object, as per safety invariants of this struct.
// We checked above that `row` and `cols` satisfy the requirements to apply the safety
// invariant.
let start = unsafe { self.buf.add(start) };
// SAFETY: due to the struct safety invariant, we know the entire slice is in a range of
// memory valid for writes. Moreover, the caller promises not to write uninitialized data
// in the returned slice. Finally, the caller guarantees aliasing rules will not be violated.
unsafe { std::slice::from_raw_parts(start, self.bytes_per_row) }jxl/src/image/internal.rs:170 (unsafe_block)
// SAFETY: due to the struct safety invariant, we know the entire slice is in a range of
// memory valid for writes. Moreover, the caller promises not to write uninitialized data
// in the returned slice. Finally, the caller guarantees aliasing rules will not be violated.
unsafe { std::slice::from_raw_parts(start, self.bytes_per_row) }
}
/// Extracts a sub-rectangle from this buffer. Rectangle coordinates are in bytes.
/// Safety note: the returned RawImageBuffer retains the same kind of access (read or write) as `self`.
jxl/src/image/internal.rs:190 (unsafe_block)
// SAFETY: the safety invariant of `self`, together with the above check, guarantees that
// the calculation does not overflow and that the new pointer stays within the bounds of
// the allocation.
let start_ptr = unsafe {
self.buf
.add(rect.origin.1 * self.bytes_between_rows + rect.origin.0)
};
// SAFETY: Thanks to the check above, all the bytes accessible from `buf` at thejxl/src/image/internal.rs:198 (unsafe_block)
// appropriate ranges are a subset of the ones accessible from `self.buf` at the
// correct ranges for `self`. Thus, the safety invariant of `self` ensures that
// the safety preconditions of this call are satisfied.
unsafe { Self::new_from_ptr(start_ptr, rect.size.1, rect.size.0, self.bytes_between_rows) }
}
/// Returns zeroed memory if `uninit` is `false`, otherwise it returns uninitialized
/// memory. The returned buffer is aligned to CACHE_LINE_BYTE_SIZE bytes.
jxl/src/image/internal.rs:228 (unsafe_block)
assert_ne!(allocation_len, 0);
let layout = Layout::from_size_align(allocation_len, CACHE_LINE_BYTE_SIZE).unwrap();
// SAFETY: we just checked that allocation_len is not 0.
let memory = unsafe {
if uninit {
alloc(layout)
} else {
alloc_zeroed(layout)jxl/src/image/internal.rs:240 (unsafe_block)
}
// SAFETY: `memory` points to a contiguous array of size minimum_allocation_size(), and we
// transfer ownership so the validity requirements are satisfied.
Ok(unsafe {
RawImageBuffer::new_from_ptr(
memory as *mut MaybeUninit<u8>,
num_rows,
bytes_per_row,jxl/src/image/internal.rs:261 (unsafe_fn)
/// The caller must ensure that the data referenced by self -- *all*
/// self.minimum_allocation_size() bytes starting from self.buf, not just the accessible bytes
/// -- can be read.
pub(super) unsafe fn try_clone(&self) -> Result<Self> {
let out = RawImageBuffer::try_allocate(self.byte_size(), true)?;
assert_eq!(self.bytes_per_row, out.bytes_per_row);
assert_eq!(self.bytes_between_rows, out.bytes_between_rows);
assert_eq!(self.num_rows, out.num_rows);jxl/src/image/internal.rs:274 (unsafe_block)
if data_len != 0 {
// SAFETY: since both `self` and `out` own the allocation, which has size `data_len`, this copy
// is safe.
unsafe {
std::ptr::copy_nonoverlapping(self.buf, out.buf, data_len);
}
}
Ok(out)jxl/src/image/internal.rs:285 (unsafe_fn)
///
/// # Safety
/// The data referenced by `self` must have been allocated with Self::try_allocate.
pub(super) unsafe fn deallocate(&mut self) {
if !self.buf.is_null() {
let allocation_len = self.minimum_allocation_size();
let layout = Layout::from_size_align(allocation_len, CACHE_LINE_BYTE_SIZE).unwrap();
// SAFETY: the buffer was allocated in `try_allocate` with the same layout.jxl/src/image/internal.rs:290 (unsafe_block)
let allocation_len = self.minimum_allocation_size();
let layout = Layout::from_size_align(allocation_len, CACHE_LINE_BYTE_SIZE).unwrap();
// SAFETY: the buffer was allocated in `try_allocate` with the same layout.
unsafe {
dealloc(self.buf as *mut u8, layout);
}
}
}jxl/src/image/internal.rs:305 (unsafe_fn)
/// - No uninit data must be written to the returned slice.
/// - The caller must ensure that ownership rules are respected (for example, because they
/// have exclusive access to the data).
unsafe fn get_rows_mut<'a>(
&self,
image: &'a mut RawImageBuffer,
) -> Self::Output<'a, MaybeUninit<u8>>;jxl/src/image/internal.rs:313 (unsafe_fn)
/// # Safety
/// - The rows are properly aligned
/// - The rows contain data that is valid for type T (and thus initialized).
unsafe fn transmute_rows<'a, T: 'static>(
rows: Self::Output<'a, MaybeUninit<u8>>,
) -> Self::Output<'a, T>;
}... and 203 more
- jxl_cli/benches/decode.rs:16:
let paths: Vec<PathBuf> = std::env::var("JXL_FILES").map_or_else(
- jxl/src/icc/tag.rs:81:
decoded_profile.write_all(&tag)?; - jxl/src/icc/tag.rs:85:
decoded_profile.write_all(b"gTRC")?; - jxl/src/icc/tag.rs:88:
decoded_profile.write_all(b"bTRC")?; - jxl/src/icc/tag.rs:92:
decoded_profile.write_all(b"gXYZ")?; - jxl/src/icc/tag.rs:95:
decoded_profile.write_all(b"bXYZ")?; - jxl/src/icc/tag.rs:168:
.write_all(&bytes) - jxl/src/icc/tag.rs:236:
.write_all(&bytes) - jxl/src/icc/tag.rs:241:
.write_all(COMMON_DATA[command as usize - 16]) - jxl/src/icc/tag.rs:242:
.and_then(|_| decoded_profile.write_all(&[0u8; 4])) - jxl_cli/benches/decode.rs:9:
use std::fs; - ... and 11 more
- jxl/src/image/internal.rs:10:
ptr::null_mut, - jxl/src/image/internal.rs:35:
buf: *mut MaybeUninit<u8>, - jxl/src/image/internal.rs:88:
buf: *mut MaybeUninit<u8>, - jxl/src/image/internal.rs:242:
memory as *mut MaybeUninit<u8>, - jxl/src/image/internal.rs:275:
std::ptr::copy_nonoverlapping(self.buf, out.buf, data_len); - jxl/src/image/internal.rs:291:
dealloc(self.buf as *mut u8, layout); - jxl/src/image/internal.rs:358:
row.as_mut_ptr().cast::<T>(),🆕 - jxl/src/image/output_buffer.rs:29:
buf: *mut MaybeUninit<u8>, - jxl/src/image/output_buffer.rs:64:
unsafe { Self::new_from_ptr(buf.as_mut_ptr(), num_rows, bytes_per_row, bytes_per... - jxl/src/image/output_buffer.rs:71:
unsafe { std::slice::from_raw_parts_mut(buf.as_mut_ptr().cast(), buf.len()) }, - ... and 99 more
- jxl/src/image/internal.rs:170:
unsafe { std::slice::from_raw_parts(start, self.bytes_per_row) } - jxl/src/image/internal.rs:346:
unsafe { std::slice::from_raw_parts_mut(start, image.bytes_per_row) } - jxl/src/image/internal.rs:357:
std::slice::from_raw_parts_mut( - jxl/src/image/output_buffer.rs:71:
unsafe { std::slice::from_raw_parts_mut(buf.as_mut_ptr().cast(), buf.len()) }, - jxl/src/image/raw.rs:89:
unsafe { std::slice::from_raw_parts_mut(row.as_mut_ptr().cast::<u8>(), row.len()...🆕 - jxl/src/image/raw.rs:100:
unsafe { std::slice::from_raw_parts(row.as_ptr().cast::<u8>(), row.len()) }🆕 - jxl/src/image/raw.rs:152:
unsafe { std::slice::from_raw_parts(row.as_ptr().cast::<u8>(), row.len()) }🆕 - jxl/src/image/raw.rs:184:
unsafe { std::slice::from_raw_parts_mut(row.as_mut_ptr().cast::<u8>(), row.len()...🆕 - jxl/src/image/typed.rs:131:
std::slice::from_raw_parts(row.as_ptr().cast::<T>(), row.len() / T::DATA_TYPE_ID...🆕 - jxl/src/image/typed.rs:144:
std::slice::from_raw_parts_mut( - ... and 5 more
Click to expand file list
| File | +/- |
|---|---|
| jxl/src/api/color.rs | +146/-2 |
| jxl/src/api/decoder.rs | +255/-150 |
| jxl/src/api/inner/codestream_parser/non_section.rs | +8/-2 |
| jxl/src/api/inner/codestream_parser/sections.rs | +12/-0 |
| jxl/src/error.rs | +16/-0 |
| jxl/src/features/epf.rs | +64/-48 |
| jxl/src/frame/decode.rs | +1/-2 |
| jxl/src/frame/modular/borrowed_buffers.rs | +3/-2 |
| jxl/src/frame/modular/decode/channel.rs | +3/-0 |
| jxl/src/frame/modular/decode/specialized_trees.rs | +75/-1 |
| jxl/src/frame/modular/mod.rs | +3/-3 |
| jxl/src/frame/modular/transforms/apply.rs | +22/-21 |
| jxl/src/frame/modular/transforms/mod.rs | +2/-3 |
| jxl/src/frame/modular/transforms/squeeze.rs | +9/-10 |
| jxl/src/frame/quant_weights.rs | +131/-1117 |
| jxl/src/frame/render.rs | +111/-15 |
| jxl/src/headers/frame_header.rs | +42/-1 |
| jxl/src/icc/tag.rs | +12/-3 |
| jxl/src/image/internal.rs | +1/-1 |
| jxl/src/image/raw.rs | +4/-4 |
| jxl/src/image/typed.rs | +4/-4 |
| jxl/src/render/internal.rs | +9/-9 |
| jxl/src/render/low_memory_pipeline/mod.rs | +1/-1 |
| jxl/src/render/low_memory_pipeline/save/identity.rs | +1/-1 |
| jxl/src/render/low_memory_pipeline/save/mod.rs | +8/-2 |
| jxl/src/render/mod.rs | +3/-2 |
| jxl/src/render/simd_utils.rs | +238/-0 |
| jxl/src/render/simple_pipeline/mod.rs | +2/-2 |
| jxl/src/render/stages/cms.rs | +548/-0 |
| jxl/src/render/stages/epf/common.rs | +29/-1 |
| jxl/src/render/stages/epf/epf0.rs | +3/-5 |
| jxl/src/render/stages/epf/epf1.rs | +3/-5 |
| jxl/src/render/stages/epf/epf2.rs | +3/-5 |
| jxl/src/render/stages/epf/test.rs | +4/-4 |
| jxl/src/render/stages/mod.rs | +2/-0 |
| jxl/src/render/stages/patches.rs | +1/-1 |
| jxl/src/render/stages/upsample.rs | +1/-1 |
| jxl/src/util/atomic_refcell/internal.rs | +2/-2 |
| jxl/src/util/cacheline.rs | +2/-2 |
| jxl/src/util/smallvec.rs | +2/-2 |
| jxl_cli/src/dec/mod.rs | +337/-2 |
| jxl_cli/src/lib.rs | +145/-0 |
| jxl_cli/src/main.rs | +46/-30 |
| jxl_simd/src/aarch64/neon.rs | +91/-0 |
| jxl_simd/src/lib.rs | +328/-1 |
| jxl_simd/src/scalar.rs | +27/-0 |
| jxl_simd/src/x86_64/avx.rs | +163/-2 |
| jxl_simd/src/x86_64/avx512.rs | +176/-3 |
| jxl_simd/src/x86_64/sse42.rs | +189/-2 |
[[audits.jxl]]
who = "Your Name <your.email@example.com>"
criteria = ["safe-to-deploy", "does-not-implement-crypto"] # Adjust based on review
delta = "v0.2.2 -> 0.2.2"
notes = "Reviewed via re-audit tool"Generated by re-audit