Skip to content

Instantly share code, notes, and snippets.

@hjanuschka
Last active January 16, 2026 08:41
Show Gist options
  • Select an option

  • Save hjanuschka/c83d6ae4851bed20a82032f622af9a0a to your computer and use it in GitHub Desktop.

Select an option

Save hjanuschka/c83d6ae4851bed20a82032f622af9a0a to your computer and use it in GitHub Desktop.
jxl-rs audit report: v0.2.2 → main (7c74207)

🔍 Audit Report: jxl v0.2.2

Comparing: v0.2.27c74207e5cff878951f9fd3648d293a19291a607

Diff stats: 49 files changed, 3288 insertions(+), 1474 deletions(-)

Summary

Metric Total Delta
Unsafe blocks (production) 223 +36 🆕
Unsafe blocks (tests) 11 +2
Files with unsafe code 18 -
Security-relevant patterns 185 +53
Dependency changes 0 -
Files changed 49 +3288/-1474

📋 Delta Review Assessment

Assuming v0.2.2 was already audited. Showing only new changes to review.

Finding
🔴 36 new unsafe blocks to review - significant effort
ℹ️ 2 new unsafe blocks in tests (lower priority)
🔴 +6 new transmute usages - careful review needed
⚠️ +47 new raw pointer operations
ℹ️ 187 existing unsafe blocks (already audited in v0.2.2)

Suggested cargo-vet Criteria

  • ub-risk-2 or ub-risk-3: Significant unsafe code present
  • safe-to-run: Filesystem/network access present, review scope
  • does-not-implement-crypto: No crypto implementations found

🆕 NEW Unsafe Code (Review Priority)

These unsafe blocks were added in this diff and require careful review:

        let row = &mut unsafe { self.data.row_mut(row + offset.1) }[offset.0..end];
        // SAFETY: MaybeUninit<u8> and u8 have the same size and layout, and our safety invariant
        // guarantees the data is initialized.
        unsafe { std::slice::from_raw_parts_mut(row.as_mut_ptr().cast::<u8>(), row.len()) }
    }

    #[inline(always)]
    pub fn row(&self, row: usize) -> &[u8] {
        let row = &unsafe { self.data.row(row + offset.1) }[offset.0..end];
        // SAFETY: MaybeUninit<u8> and u8 have the same size and layout, and our safety invariant
        // guarantees the data is initialized.
        unsafe { std::slice::from_raw_parts(row.as_ptr().cast::<u8>(), row.len()) }
    }

    pub fn byte_size(&self) -> (usize, usize) {
        let size = self.data.byte_size();
        let row = unsafe { self.data.row(row) };
        // SAFETY: MaybeUninit<u8> and u8 have the same size and layout, and our safety invariant
        // guarantees the data is initialized.
        unsafe { std::slice::from_raw_parts(row.as_ptr().cast::<u8>(), row.len()) }
    }

    pub fn rect(&self, rect: Rect) -> RawImageRect<'a> {
        Self {
        let row = unsafe { self.data.row_mut(row) };
        // SAFETY: MaybeUninit<u8> and u8 have the same size and layout, and our safety invariant
        // guarantees the data is initialized.
        unsafe { std::slice::from_raw_parts_mut(row.as_mut_ptr().cast::<u8>(), row.len()) }
    }

    pub fn rect_mut(&'_ mut self, rect: Rect) -> RawImageRectMut<'_> {
        Self {
        assert!(src.len() >= 2 * Self::LEN);
        // SAFETY: we just checked that `src` has enough space, and neon is available
        // from the safety invariant on `d`.
        let float32x4x2_t(a, b) = unsafe { vld2q_f32(src.as_ptr()) };
        (Self(a, d), Self(b, d))
    }

    #[inline(always)]
        assert!(src.len() >= 3 * Self::LEN);
        // SAFETY: we just checked that `src` has enough space, and neon is available
        // from the safety invariant on `d`.
        let float32x4x3_t(a, b, c) = unsafe { vld3q_f32(src.as_ptr()) };
        (Self(a, d), Self(b, d), Self(c, d))
    }

    #[inline(always)]
        assert!(src.len() >= 4 * Self::LEN);
        // SAFETY: we just checked that `src` has enough space, and neon is available
        // from the safety invariant on `d`.
        let float32x4x4_t(a, b, c, e) = unsafe { vld4q_f32(src.as_ptr()) };
        (Self(a, d), Self(b, d), Self(c, d), Self(e, d))
    }

    #[inline(always)]
            // BF16 is the high 16 bits of f32
            // SAFETY: neon is available from target_feature
            let (table_lo, table_hi) =
                unsafe { (vld1q_f32(table.as_ptr()), vld1q_f32(table.as_ptr().add(4))) };

            // Reinterpret as u32 to extract high 16 bits
            let table_lo_u32 = vreinterpretq_u32_f32(table_lo);
            let table_hi_u32 = vreinterpretq_u32_f32(table_hi);
            vreinterpretq_u8_u16(bf16_table_u16)
        }
        // SAFETY: neon is available from the safety invariant on the descriptor
        Bf16Table8Neon(unsafe { prepare_impl(table) })
    }

    #[inline(always)]
    fn table_lookup_bf16_8(d: NeonDescriptor, table: Bf16Table8Neon, indices: I32VecNeon) -> Self {
            vreinterpretq_f32_u8(result)
        }
        // SAFETY: neon is available from the safety invariant on the descriptor
        F32VecNeon(unsafe { lookup_impl(table.0, indices.0) }, d)
    }
}

impl Add<F32VecNeon> for F32VecNeon {
            // Input: [a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7]
            // Output: a = [a0, a1, a2, a3, a4, a5, a6, a7], b = [b0, b1, b2, b3, b4, b5, b6, b7]
            // SAFETY: we just checked that src has enough space.
            let (in0, in1) = unsafe {
                (
                    _mm256_loadu_ps(src.as_ptr()),        // [a0,b0,a1,b1, a2,b2,a3,b3]
                    _mm256_loadu_ps(src.as_ptr().add(8)), // [a4,b4,a5,b5, a6,b6,a7,b7]
                )
        }

        // SAFETY: avx2 is available from the safety invariant on the descriptor.
        let (a, b) = unsafe { load_deinterleaved_2_impl(src) };
        (Self(a, d), Self(b, d))
    }

    #[inline(always)]
            // Output: a = [a0..a7], b = [b0..b7], c = [c0..c7]

            // SAFETY: we just checked that src has enough space.
            let (in0, in1, in2) = unsafe {
                (
                    _mm256_loadu_ps(src.as_ptr()),
                    _mm256_loadu_ps(src.as_ptr().add(8)),
                    _mm256_loadu_ps(src.as_ptr().add(16)),
        }

        // SAFETY: avx2 is available from the safety invariant on the descriptor.
        let (a, b, c) = unsafe { load_deinterleaved_3_impl(src) };
        (Self(a, d), Self(b, d), Self(c, d))
    }

    #[inline(always)]
            // Input: [a0,b0,c0,d0, a1,b1,c1,d1, a2,b2,c2,d2, a3,b3,c3,d3, ...]
            // Output: a = [a0..a7], b = [b0..b7], c = [c0..c7], d = [d0..d7]
            // SAFETY: we just checked that src has enough space.
            let (in0, in1, in2, in3) = unsafe {
                (
                    _mm256_loadu_ps(src.as_ptr()),         // [a0,b0,c0,d0, a1,b1,c1,d1]
                    _mm256_loadu_ps(src.as_ptr().add(8)),  // [a2,b2,c2,d2, a3,b3,c3,d3]
                    _mm256_loadu_ps(src.as_ptr().add(16)), // [a4,b4,c4,d4, a5,b5,c5,d5]
        }

        // SAFETY: avx2 is available from the safety invariant on the descriptor.
        let (a, b, c, dv) = unsafe { load_deinterleaved_4_impl(src) };
        (Self(a, d), Self(b, d), Self(c, d), Self(dv, d))
    }

    fn_avx!(this: F32VecAvx, fn mul_add(mul: F32VecAvx, add: F32VecAvx) -> F32VecAvx {
    fn prepare_table_bf16_8(_d: AvxDescriptor, table: &[f32; 8]) -> Bf16Table8Avx {
        // For AVX2, vpermps is exact and fast, so we just load the table as-is
        // SAFETY: avx2 is available from the safety invariant on the descriptor
        Bf16Table8Avx(unsafe { _mm256_loadu_ps(table.as_ptr()) })
    }

    #[inline(always)]
    fn table_lookup_bf16_8(d: AvxDescriptor, table: Bf16Table8Avx, indices: I32VecAvx) -> Self {
    #[inline(always)]
    fn table_lookup_bf16_8(d: AvxDescriptor, table: Bf16Table8Avx, indices: I32VecAvx) -> Self {
        // SAFETY: avx2 is available from the safety invariant on the descriptor
        F32VecAvx(unsafe { _mm256_permutevar8x32_ps(table.0, indices.0) }, d)
    }

    #[inline(always)]
    fn round_store_u8(self, dest: &mut [u8]) {
            // Input: [a0,b0,a1,b1,...,a15,b15]
            // Output: a = [a0..a15], b = [b0..b15]
            // SAFETY: we just checked that src has enough space.
            let (in0, in1) = unsafe {
                (
                    _mm512_loadu_ps(src.as_ptr()),
                    _mm512_loadu_ps(src.as_ptr().add(16)),
                )
        }

        // SAFETY: avx512f is available from the safety invariant on the descriptor.
        let (a, b) = unsafe { load_deinterleaved_2_impl(src) };
        (Self(a, d), Self(b, d))
    }

    #[inline(always)]
            // Output: a = [a0..a15], b = [b0..b15], c = [c0..c15]

            // SAFETY: we just checked that src has enough space.
            let (in0, in1, in2) = unsafe {
                (
                    _mm512_loadu_ps(src.as_ptr()),
                    _mm512_loadu_ps(src.as_ptr().add(16)),
                    _mm512_loadu_ps(src.as_ptr().add(32)),
        }

        // SAFETY: avx512f is available from the safety invariant on the descriptor.
        let (a, b, c) = unsafe { load_deinterleaved_3_impl(src) };
        (Self(a, d), Self(b, d), Self(c, d))
    }

    #[inline(always)]
            // Input: [a0,b0,c0,d0,a1,b1,c1,d1,...] (64 floats)
            // Output: a = [a0..a15], b = [b0..b15], c = [c0..c15], d = [d0..d15]
            // SAFETY: we just checked that src has enough space.
            let (in0, in1, in2, in3) = unsafe {
                (
                    _mm512_loadu_ps(src.as_ptr()),
                    _mm512_loadu_ps(src.as_ptr().add(16)),
                    _mm512_loadu_ps(src.as_ptr().add(32)),
        }

        // SAFETY: avx512f is available from the safety invariant on the descriptor.
        let (a, b, c, dv) = unsafe { load_deinterleaved_4_impl(src) };
        (Self(a, d), Self(b, d), Self(c, d), Self(dv, d))
    }

    fn_avx!(this: F32VecAvx512, fn mul_add(mul: F32VecAvx512, add: F32VecAvx512) -> F32VecAvx512 {
        #[inline]
        fn prepare_impl(table: &[f32; 8]) -> __m512 {
            // SAFETY: avx512f is available from target_feature
            let table_256 = unsafe { _mm256_loadu_ps(table.as_ptr()) };
            // Zero-extend to 512-bit; vpermutexvar with indices 0-7 only reads first 256 bits
            _mm512_castps256_ps512(table_256)
        }
        // SAFETY: avx512f is available from the safety invariant on the descriptor
            _mm512_castps256_ps512(table_256)
        }
        // SAFETY: avx512f is available from the safety invariant on the descriptor
        Bf16Table8Avx512(unsafe { prepare_impl(table) })
    }

    #[inline(always)]
    fn table_lookup_bf16_8(
        indices: I32VecAvx512,
    ) -> Self {
        // SAFETY: avx512f is available from the safety invariant on the descriptor
        F32VecAvx512(unsafe { _mm512_permutexvar_ps(indices.0, table.0) }, d)
    }

    #[inline(always)]
    fn round_store_u8(self, dest: &mut [u8]) {
            // Input: [a0, b0, a1, b1, a2, b2, a3, b3]
            // Output: a = [a0, a1, a2, a3], b = [b0, b1, b2, b3]
            // SAFETY: we just checked that src has enough space.
            let (in0, in1) = unsafe {
                (
                    _mm_loadu_ps(src.as_ptr()),        // [a0, b0, a1, b1]
                    _mm_loadu_ps(src.as_ptr().add(4)), // [a2, b2, a3, b3]
                )
        }

        // SAFETY: sse4.2 is available from the safety invariant on the descriptor.
        let (a, b) = unsafe { load_deinterleaved_2_impl(src) };
        (Self(a, d), Self(b, d))
    }

    #[inline(always)]
            // Output: a = [a0, a1, a2, a3], b = [b0, b1, b2, b3], c = [c0, c1, c2, c3]

            // SAFETY: we just checked that src has enough space.
            let (in0, in1, in2) = unsafe {
                (
                    _mm_loadu_ps(src.as_ptr()),        // [a0, b0, c0, a1]
                    _mm_loadu_ps(src.as_ptr().add(4)), // [b1, c1, a2, b2]
                    _mm_loadu_ps(src.as_ptr().add(8)), // [c2, a3, b3, c3]
        }

        // SAFETY: sse4.2 is available from the safety invariant on the descriptor.
        let (a, b, c) = unsafe { load_deinterleaved_3_impl(src) };
        (Self(a, d), Self(b, d), Self(c, d))
    }

    #[inline(always)]
            // Input: [a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, a3, b3, c3, d3]
            // Output: a = [a0, a1, a2, a3], b = [b0, b1, b2, b3], c = [c0, c1, c2, c3], d = [d0, d1, d2, d3]
            // SAFETY: we just checked that src has enough space.
            let (in0, in1, in2, in3) = unsafe {
                (
                    _mm_loadu_ps(src.as_ptr()),         // [a0, b0, c0, d0]
                    _mm_loadu_ps(src.as_ptr().add(4)),  // [a1, b1, c1, d1]
                    _mm_loadu_ps(src.as_ptr().add(8)),  // [a2, b2, c2, d2]
        }

        // SAFETY: sse4.2 is available from the safety invariant on the descriptor.
        let (a, b, c, dv) = unsafe { load_deinterleaved_4_impl(src) };
        (Self(a, d), Self(b, d), Self(c, d), Self(dv, d))
    }

    fn_sse42!(this: F32VecSse42, fn mul_add(mul: F32VecSse42, add: F32VecSse42) -> F32VecSse42 {
            // Convert f32 table to BF16 packed in 128 bits (16 bytes for 8 entries)
            // BF16 is the high 16 bits of f32
            // SAFETY: table has exactly 8 elements and sse4.2 is available from target_feature
            let (table_lo, table_hi) = unsafe {
                (
                    _mm_loadu_ps(table.as_ptr()),
                    _mm_loadu_ps(table.as_ptr().add(4)),
                )
            _mm_unpacklo_epi64(bf16_lo, bf16_hi)
        }
        // SAFETY: sse4.2 is available from the safety invariant on the descriptor
        Bf16Table8Sse42(unsafe { prepare_impl(table) })
    }

    #[inline(always)]
    fn table_lookup_bf16_8(
            _mm_castsi128_ps(result)
        }
        // SAFETY: sse4.2 is available from the safety invariant on the descriptor
        F32VecSse42(unsafe { lookup_impl(table.0, indices.0) }, d)
    }

    #[inline(always)]
    fn round_store_u8(self, dest: &mut [u8]) {

All Unsafe Code Locations (Production)

Click to expand all unsafe code details
// SAFETY: The safety invariant on RawImageBuffer enforces ownership rules on the contained data,
// there is no reason for it not to be Send.
unsafe impl Send for RawImageBuffer {}

// SAFETY: RawImageBuffer does not use any kind of interior mutability, so it is safe to share
// between threads.
unsafe impl Sync for RawImageBuffer {}
// SAFETY: RawImageBuffer does not use any kind of interior mutability, so it is safe to share
// between threads.
unsafe impl Sync for RawImageBuffer {}

impl RawImageBuffer {
    pub(super) fn check_vals(num_rows: usize, bytes_per_row: usize, bytes_between_rows: usize) {
        if num_rows > 0 {
    ///
    /// Note: these safety requirements match those of JxlOutputBuffer::new_from_ptr, except we
    /// only request validity for reads.
    pub(super) unsafe fn new_from_ptr(
        buf: *mut MaybeUninit<u8>,
        num_rows: usize,
        bytes_per_row: usize,
        bytes_between_rows: usize,
    /// - The caller must ensure that ownership rules are respected (for example, because they
    ///   have exclusive access to the data).
    #[inline(always)]
    pub(super) unsafe fn row_mut(&mut self, row: usize) -> &mut [MaybeUninit<u8>] {
        // SAFETY: The safety requirements for distinct_rows_mut match the ones for row_mut.
        unsafe { self.distinct_rows_mut([row])[0] }
    }
    #[inline(always)]
    pub(super) unsafe fn row_mut(&mut self, row: usize) -> &mut [MaybeUninit<u8>] {
        // SAFETY: The safety requirements for distinct_rows_mut match the ones for row_mut.
        unsafe { self.distinct_rows_mut([row])[0] }
    }

    /// Note: this is quadratic in the number of rows.
    /// # Safety
    /// - The caller must ensure that ownership rules are respected (for example, because they
    ///   have exclusive access to the data).
    #[inline(always)]
    pub(super) unsafe fn distinct_rows_mut<I: DistinctRowsIndexes>(
        &mut self,
        rows: I,
    ) -> I::Output<'_, MaybeUninit<u8>> {
        // SAFETY: the safety requirements of `get_rows_mut` are the same as the ones for
    ) -> I::Output<'_, MaybeUninit<u8>> {
        // SAFETY: the safety requirements of `get_rows_mut` are the same as the ones for
        // `distinct_rows_mut`.
        unsafe { rows.get_rows_mut(self) }
    }

    /// # Safety
    /// The caller must ensure that ownership and lifetime rules are respected (for example,
    /// The caller must ensure that ownership and lifetime rules are respected (for example,
    /// because they have shared access to the data).
    #[inline(always)]
    pub(super) unsafe fn row(&self, row: usize) -> &[MaybeUninit<u8>] {
        assert!(row < self.num_rows);
        let start = row * self.bytes_between_rows;
        // SAFETY: `start` is guaranteed to be <= isize::MAX, and `self.buf + start` is guaranteed
        // to fit within the same allocated object, as per safety invariants of this struct.
        // to fit within the same allocated object, as per safety invariants of this struct.
        // We checked above that `row` and `cols` satisfy the requirements to apply the safety
        // invariant.
        let start = unsafe { self.buf.add(start) };
        // SAFETY: due to the struct safety invariant, we know the entire slice is in a range of
        // memory valid for writes. Moreover, the caller promises not to write uninitialized data
        // in the returned slice. Finally, the caller guarantees aliasing rules will not be violated.
        unsafe { std::slice::from_raw_parts(start, self.bytes_per_row) }
        // SAFETY: due to the struct safety invariant, we know the entire slice is in a range of
        // memory valid for writes. Moreover, the caller promises not to write uninitialized data
        // in the returned slice. Finally, the caller guarantees aliasing rules will not be violated.
        unsafe { std::slice::from_raw_parts(start, self.bytes_per_row) }
    }

    /// Extracts a sub-rectangle from this buffer. Rectangle coordinates are in bytes.
    /// Safety note: the returned RawImageBuffer retains the same kind of access (read or write) as `self`.
        // SAFETY: the safety invariant of `self`, together with the above check, guarantees that
        // the calculation does not overflow and that the new pointer stays within the bounds of
        // the allocation.
        let start_ptr = unsafe {
            self.buf
                .add(rect.origin.1 * self.bytes_between_rows + rect.origin.0)
        };
        // SAFETY: Thanks to the check above, all the bytes accessible from `buf` at the
        // appropriate ranges are a subset of the ones accessible from `self.buf` at the
        // correct ranges for `self`. Thus, the safety invariant of `self` ensures that
        // the safety preconditions of this call are satisfied.
        unsafe { Self::new_from_ptr(start_ptr, rect.size.1, rect.size.0, self.bytes_between_rows) }
    }

    /// Returns zeroed memory if `uninit` is `false`, otherwise it returns uninitialized
    /// memory. The returned buffer is aligned to CACHE_LINE_BYTE_SIZE bytes.
        assert_ne!(allocation_len, 0);
        let layout = Layout::from_size_align(allocation_len, CACHE_LINE_BYTE_SIZE).unwrap();
        // SAFETY: we just checked that allocation_len is not 0.
        let memory = unsafe {
            if uninit {
                alloc(layout)
            } else {
                alloc_zeroed(layout)
        }
        // SAFETY: `memory` points to a contiguous array of size minimum_allocation_size(), and we
        // transfer ownership so the validity requirements are satisfied.
        Ok(unsafe {
            RawImageBuffer::new_from_ptr(
                memory as *mut MaybeUninit<u8>,
                num_rows,
                bytes_per_row,
    /// The caller must ensure that the data referenced by self -- *all*
    /// self.minimum_allocation_size() bytes starting from self.buf, not just the accessible bytes
    /// -- can be read.
    pub(super) unsafe fn try_clone(&self) -> Result<Self> {
        let out = RawImageBuffer::try_allocate(self.byte_size(), true)?;
        assert_eq!(self.bytes_per_row, out.bytes_per_row);
        assert_eq!(self.bytes_between_rows, out.bytes_between_rows);
        assert_eq!(self.num_rows, out.num_rows);
        if data_len != 0 {
            // SAFETY: since both `self` and `out` own the allocation, which has size `data_len`, this copy
            // is safe.
            unsafe {
                std::ptr::copy_nonoverlapping(self.buf, out.buf, data_len);
            }
        }
        Ok(out)
    ///
    /// # Safety
    /// The data referenced by `self` must have been allocated with Self::try_allocate.
    pub(super) unsafe fn deallocate(&mut self) {
        if !self.buf.is_null() {
            let allocation_len = self.minimum_allocation_size();
            let layout = Layout::from_size_align(allocation_len, CACHE_LINE_BYTE_SIZE).unwrap();
            // SAFETY: the buffer was allocated in `try_allocate` with the same layout.
            let allocation_len = self.minimum_allocation_size();
            let layout = Layout::from_size_align(allocation_len, CACHE_LINE_BYTE_SIZE).unwrap();
            // SAFETY: the buffer was allocated in `try_allocate` with the same layout.
            unsafe {
                dealloc(self.buf as *mut u8, layout);
            }
        }
    }
    /// - No uninit data must be written to the returned slice.
    /// - The caller must ensure that ownership rules are respected (for example, because they
    ///   have exclusive access to the data).
    unsafe fn get_rows_mut<'a>(
        &self,
        image: &'a mut RawImageBuffer,
    ) -> Self::Output<'a, MaybeUninit<u8>>;
    /// # Safety
    /// - The rows are properly aligned
    /// - The rows contain data that is valid for type T (and thus initialized).
    unsafe fn transmute_rows<'a, T: 'static>(
        rows: Self::Output<'a, MaybeUninit<u8>>,
    ) -> Self::Output<'a, T>;
}

... and 203 more

Security-Relevant Patterns

ENV (1 occurrences)

FS (21 occurrences)

RAW_PTR (109 occurrences, 47 new)

TRANSMUTE (15 occurrences, 6 new)

Changed Files

Click to expand file list
File +/-
jxl/src/api/color.rs +146/-2
jxl/src/api/decoder.rs +255/-150
jxl/src/api/inner/codestream_parser/non_section.rs +8/-2
jxl/src/api/inner/codestream_parser/sections.rs +12/-0
jxl/src/error.rs +16/-0
jxl/src/features/epf.rs +64/-48
jxl/src/frame/decode.rs +1/-2
jxl/src/frame/modular/borrowed_buffers.rs +3/-2
jxl/src/frame/modular/decode/channel.rs +3/-0
jxl/src/frame/modular/decode/specialized_trees.rs +75/-1
jxl/src/frame/modular/mod.rs +3/-3
jxl/src/frame/modular/transforms/apply.rs +22/-21
jxl/src/frame/modular/transforms/mod.rs +2/-3
jxl/src/frame/modular/transforms/squeeze.rs +9/-10
jxl/src/frame/quant_weights.rs +131/-1117
jxl/src/frame/render.rs +111/-15
jxl/src/headers/frame_header.rs +42/-1
jxl/src/icc/tag.rs +12/-3
jxl/src/image/internal.rs +1/-1
jxl/src/image/raw.rs +4/-4
jxl/src/image/typed.rs +4/-4
jxl/src/render/internal.rs +9/-9
jxl/src/render/low_memory_pipeline/mod.rs +1/-1
jxl/src/render/low_memory_pipeline/save/identity.rs +1/-1
jxl/src/render/low_memory_pipeline/save/mod.rs +8/-2
jxl/src/render/mod.rs +3/-2
jxl/src/render/simd_utils.rs +238/-0
jxl/src/render/simple_pipeline/mod.rs +2/-2
jxl/src/render/stages/cms.rs +548/-0
jxl/src/render/stages/epf/common.rs +29/-1
jxl/src/render/stages/epf/epf0.rs +3/-5
jxl/src/render/stages/epf/epf1.rs +3/-5
jxl/src/render/stages/epf/epf2.rs +3/-5
jxl/src/render/stages/epf/test.rs +4/-4
jxl/src/render/stages/mod.rs +2/-0
jxl/src/render/stages/patches.rs +1/-1
jxl/src/render/stages/upsample.rs +1/-1
jxl/src/util/atomic_refcell/internal.rs +2/-2
jxl/src/util/cacheline.rs +2/-2
jxl/src/util/smallvec.rs +2/-2
jxl_cli/src/dec/mod.rs +337/-2
jxl_cli/src/lib.rs +145/-0
jxl_cli/src/main.rs +46/-30
jxl_simd/src/aarch64/neon.rs +91/-0
jxl_simd/src/lib.rs +328/-1
jxl_simd/src/scalar.rs +27/-0
jxl_simd/src/x86_64/avx.rs +163/-2
jxl_simd/src/x86_64/avx512.rs +176/-3
jxl_simd/src/x86_64/sse42.rs +189/-2

Audit Template

[[audits.jxl]]
who = "Your Name <your.email@example.com>"
criteria = ["safe-to-deploy", "does-not-implement-crypto"]  # Adjust based on review
delta = "v0.2.2 -> 0.2.2"
notes = "Reviewed via re-audit tool"

Generated by re-audit

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment