diff --git a/benches/unfilter.rs b/benches/unfilter.rs index 4ff5daa8..6cc9d5f5 100644 --- a/benches/unfilter.rs +++ b/benches/unfilter.rs @@ -9,17 +9,12 @@ use criterion::{criterion_group, criterion_main, Criterion, Throughput}; use png::benchable_apis::unfilter; -use png::FilterType; +use png::Filter; use rand::Rng; fn unfilter_all(c: &mut Criterion) { let bpps = [1, 2, 3, 4, 6, 8]; - let filters = [ - FilterType::Sub, - FilterType::Up, - FilterType::Avg, - FilterType::Paeth, - ]; + let filters = [Filter::Sub, Filter::Up, Filter::Avg, Filter::Paeth]; for &filter in filters.iter() { for &bpp in bpps.iter() { bench_unfilter(c, filter, bpp); @@ -30,7 +25,7 @@ fn unfilter_all(c: &mut Criterion) { criterion_group!(benches, unfilter_all); criterion_main!(benches); -fn bench_unfilter(c: &mut Criterion, filter: FilterType, bpp: u8) { +fn bench_unfilter(c: &mut Criterion, filter: Filter, bpp: u8) { let mut group = c.benchmark_group("unfilter"); fn get_random_bytes(rng: &mut R, n: usize) -> Vec { diff --git a/examples/corpus-bench.rs b/examples/corpus-bench.rs index 07307fb3..c088d71c 100644 --- a/examples/corpus-bench.rs +++ b/examples/corpus-bench.rs @@ -43,20 +43,16 @@ fn run_encode( encoder.set_depth(bit_depth); encoder.set_compression(match args.speed { Speed::Fast => png::Compression::Fast, - Speed::Default => png::Compression::Default, - Speed::Best => png::Compression::Best, + Speed::Default => png::Compression::Balanced, + Speed::Best => png::Compression::High, }); encoder.set_filter(match args.filter { - Filter::None => png::FilterType::NoFilter, - Filter::Sub => png::FilterType::Sub, - Filter::Up => png::FilterType::Up, - Filter::Average => png::FilterType::Avg, - Filter::Paeth => png::FilterType::Paeth, - Filter::Adaptive => png::FilterType::Paeth, - }); - encoder.set_adaptive_filter(match args.filter { - Filter::Adaptive => png::AdaptiveFilterType::Adaptive, - _ => png::AdaptiveFilterType::NonAdaptive, + Filter::None => png::Filter::NoFilter, + Filter::Sub => png::Filter::Sub, + Filter::Up => png::Filter::Up, + Filter::Average => png::Filter::Avg, + Filter::Paeth => png::Filter::Paeth, + Filter::Adaptive => png::Filter::Adaptive, }); let mut encoder = encoder.write_header().unwrap(); encoder.write_image_data(image).unwrap(); diff --git a/fuzz/fuzz_targets/roundtrip.rs b/fuzz/fuzz_targets/roundtrip.rs index e428444d..904612ee 100644 --- a/fuzz/fuzz_targets/roundtrip.rs +++ b/fuzz/fuzz_targets/roundtrip.rs @@ -1,7 +1,7 @@ #![no_main] use libfuzzer_sys::fuzz_target; -use png::{FilterType, ColorType, BitDepth}; +use png::{Filter, ColorType, BitDepth}; fuzz_target!(|data: (u8, u8, u8, u8, u8, Vec, Vec)| { if let Some((raw, encoded)) = encode_png(data.0, data.1, data.2, data.3, data.4, &data.5, &data.6) { @@ -16,7 +16,7 @@ fn encode_png<'a>(width: u8, filter: u8, compression: u8, color_type: u8, raw_bi // Convert untyped bytes to the correct types and validate them: let width = width as u32; if width == 0 { return None }; - let filter = FilterType::from_u8(filter)?; + let filter = filter_from_u8(filter); let bit_depth = BitDepth::from_u8(raw_bit_depth)?; let max_palette_length = 3 * u32::pow(2, raw_bit_depth as u32) as usize; let mut palette = raw_palette; @@ -29,11 +29,9 @@ fn encode_png<'a>(width: u8, filter: u8, compression: u8, color_type: u8, raw_bi } // compression let compression = match compression { - 0 => png::Compression::Default, - 1 => png::Compression::Fast, - 2 => png::Compression::Best, - 3 => png::Compression::Huffman, - 4 => png::Compression::Rle, + 0 => png::DeflateCompression::NoCompression, + level @ 1..=9 => png::DeflateCompression::Flate2(level), + 10 => png::DeflateCompression::FdeflateUltraFast, _ => return None, }; @@ -52,7 +50,7 @@ fn encode_png<'a>(width: u8, filter: u8, compression: u8, color_type: u8, raw_bi encoder.set_depth(bit_depth); encoder.set_color(color_type); encoder.set_filter(filter); - encoder.set_compression(compression); + encoder.set_deflate_compression(compression); if let ColorType::Indexed = color_type { encoder.set_palette(palette) } @@ -75,6 +73,18 @@ fn decode_png(data: &[u8]) -> (png::OutputInfo, Vec) { (info, img_data) } +/// Filter::from() doesn't cover the Filter::Adaptive variant, so we roll our own +fn filter_from_u8(input: u8) -> Filter { + match input { + 0 => Filter::NoFilter, + 1 => Filter::Sub, + 2 => Filter::Up, + 3 => Filter::Avg, + 4 => Filter::Paeth, + _ => Filter::Adaptive, + } +} + // copied from the `png` codebase because it's pub(crate) fn raw_row_length_from_width(depth: BitDepth, color: ColorType, width: u32) -> usize { let samples = width as usize * color.samples(); diff --git a/src/benchable_apis.rs b/src/benchable_apis.rs index 17b0b0d6..10599037 100644 --- a/src/benchable_apis.rs +++ b/src/benchable_apis.rs @@ -2,12 +2,13 @@ //! This module is gated behind the "benchmarks" feature. use crate::common::BytesPerPixel; -use crate::filter::FilterType; +use crate::filter::{Filter, RowFilter}; use crate::{BitDepth, ColorType, Info}; /// Re-exporting `unfilter` to make it easier to benchmark, despite some items being only /// `pub(crate)`: `fn unfilter`, `enum BytesPerPixel`. -pub fn unfilter(filter: FilterType, tbpp: u8, previous: &[u8], current: &mut [u8]) { +pub fn unfilter(filter: Filter, tbpp: u8, previous: &[u8], current: &mut [u8]) { + let filter = RowFilter::from_method(filter).unwrap(); // RowFilter type is private let tbpp = BytesPerPixel::from_usize(tbpp as usize); crate::filter::unfilter(filter, tbpp, previous, current) } diff --git a/src/common.rs b/src/common.rs index 50fd2725..081cbc47 100644 --- a/src/common.rs +++ b/src/common.rs @@ -1,5 +1,7 @@ //! Common types shared between the encoder and decoder use crate::text_metadata::{ITXtChunk, TEXtChunk, ZTXtChunk}; +#[allow(unused_imports)] // used by doc comments only +use crate::Filter; use crate::{chunk, encoder}; use io::Write; use std::{borrow::Cow, convert::TryFrom, fmt, io}; @@ -313,33 +315,103 @@ impl AnimationControl { } /// The type and strength of applied compression. +/// +/// This is a simple, high-level interface that will automatically choose +/// the appropriate DEFLATE compression mode and PNG filter. +/// +/// If you need more control over the encoding paramters, +/// you can set the [DeflateCompression] and [Filter] manually. #[derive(Debug, Clone, Copy)] +#[non_exhaustive] pub enum Compression { - /// Default level - Default, - /// Fast minimal compression - Fast, - /// Higher compression level + /// No compression whatsoever. Fastest, but results in large files. + NoCompression, + /// Extremely fast but light compression. + Fastest, + /// Extremely fast compression with a decent compression ratio. /// - /// Best in this context isn't actually the highest possible level - /// the encoder can do, but is meant to emulate the `Best` setting in the `Flate2` - /// library. - Best, - #[deprecated( - since = "0.17.6", - note = "use one of the other compression levels instead, such as 'fast'" - )] - Huffman, - #[deprecated( - since = "0.17.6", - note = "use one of the other compression levels instead, such as 'fast'" - )] - Rle, + /// Significantly outperforms libpng and other popular encoders + /// by using a [specialized DEFLATE implementation tuned for PNG](https://crates.io/crates/fdeflate), + /// while still providing better compression ratio than the fastest modes of other encoders. + Fast, + /// Balances encoding speed and compression ratio + Balanced, + /// Spend more time to produce a slightly smaller file than with `Default` + High, } impl Default for Compression { fn default() -> Self { - Self::Default + Self::Balanced + } +} + +/// Advanced compression settings with more customization options than [Compression]. +/// +/// Note that this setting only affects DEFLATE compression. +/// Another setting that influences the compression ratio and lets you choose +/// between encoding speed and compression ratio is the [Filter]. +/// +/// ### Stability guarantees +/// +/// The implementation details of DEFLATE compression may evolve over time, +/// even without a semver-breaking change to the version of `png` crate. +/// +/// If a certain compression setting is superseded by other options, +/// it may be marked deprecated and remapped to a different option. +/// You will see a deprecation notice when compiling code relying on such options. +#[non_exhaustive] +#[derive(Debug, Clone, Copy)] +pub enum DeflateCompression { + /// Do not compress the data at all. + /// + /// Useful for incompressible images, + /// or when speed is paramount and you don't care about size at all. + /// + /// This mode also disables filters, forcing [Filter::NoFilter]. + NoCompression, + + /// Excellent for creating lightly compressed PNG images very quickly. + /// + /// Uses the [fdeflate](https://crates.io/crates/fdeflate) crate under the hood + /// to achieve speeds far exceeding what libpng is capable of + /// while still providing a decent compression ratio. + /// + /// Images encoded in this mode can also be decoded by the `png` crate slightly faster than usual. + /// Other decoders (e.g. libpng) do not get a decoding speed boost from this mode. + FdeflateUltraFast, + + /// Uses [flate2](https://crates.io/crates/flate2) crate with the specified [compression level](flate2::Compression::new). + /// + /// Flate2 has several backends that make different trade-offs. + /// See the flate2 documentation for the available backends for more information. + Flate2(u8), + // Other variants can be added in the future +} + +impl Default for DeflateCompression { + fn default() -> Self { + Self::from_simple(Compression::Balanced) + } +} + +impl DeflateCompression { + pub(crate) fn from_simple(value: Compression) -> Self { + match value { + Compression::NoCompression => Self::NoCompression, + Compression::Fastest => Self::FdeflateUltraFast, + Compression::Fast => Self::FdeflateUltraFast, + Compression::Balanced => Self::Flate2(flate2::Compression::default().level() as u8), + Compression::High => Self::Flate2(flate2::Compression::best().level() as u8), + } + } + + pub(crate) fn closest_flate2_level(&self) -> flate2::Compression { + match self { + DeflateCompression::NoCompression => flate2::Compression::none(), + DeflateCompression::FdeflateUltraFast => flate2::Compression::new(1), + DeflateCompression::Flate2(level) => flate2::Compression::new(u32::from(*level)), + } } } diff --git a/src/decoder/unfiltering_buffer.rs b/src/decoder/unfiltering_buffer.rs index 417f8f0d..27ca1afe 100644 --- a/src/decoder/unfiltering_buffer.rs +++ b/src/decoder/unfiltering_buffer.rs @@ -1,6 +1,6 @@ use super::stream::{DecodingError, FormatErrorInner}; use crate::common::BytesPerPixel; -use crate::filter::{unfilter, FilterType}; +use crate::filter::{unfilter, RowFilter}; // Buffer for temporarily holding decompressed, not-yet-`unfilter`-ed rows. pub(crate) struct UnfilteringBuffer { @@ -96,7 +96,7 @@ impl UnfilteringBuffer { debug_assert!(prev.is_empty() || prev.len() == (rowlen - 1)); // Get the filter type. - let filter = FilterType::from_u8(row[0]).ok_or(DecodingError::Format( + let filter = RowFilter::from_u8(row[0]).ok_or(DecodingError::Format( FormatErrorInner::UnknownFilterMethod(row[0]).into(), ))?; let row = &mut row[1..rowlen]; diff --git a/src/encoder.rs b/src/encoder.rs index dc3bfc5b..ac23a68e 100644 --- a/src/encoder.rs +++ b/src/encoder.rs @@ -11,11 +11,12 @@ use crate::common::{ AnimationControl, BitDepth, BlendOp, BytesPerPixel, ColorType, Compression, DisposeOp, FrameControl, Info, ParameterError, ParameterErrorKind, PixelDimensions, ScaledFloat, Unit, }; -use crate::filter::{filter, AdaptiveFilterType, FilterType}; +use crate::filter::{filter, Filter}; use crate::text_metadata::{ encode_iso_8859_1, EncodableTextChunk, ITXtChunk, TEXtChunk, TextEncodingError, ZTXtChunk, }; use crate::traits::WriteBytesExt; +use crate::DeflateCompression; pub type Result = result::Result; @@ -151,14 +152,13 @@ pub struct Encoder<'a, W: Write> { options: Options, } -/// Decoding options, internal type, forwarded to the Writer. +/// Encoding options, internal type, forwarded to the Writer. #[derive(Default)] struct Options { - filter: FilterType, - adaptive_filter: AdaptiveFilterType, + filter: Filter, sep_def_img: bool, validate_sequence: bool, - compression: Compression, + compression: DeflateCompression, } impl<'a, W: Write> Encoder<'a, W> { @@ -309,31 +309,28 @@ impl<'a, W: Write> Encoder<'a, W> { self.info.bit_depth = depth; } - /// Set compression parameters. - /// - /// Accepts a `Compression` or any type that can transform into a `Compression`. Notably `deflate::Compression` and - /// `deflate::CompressionOptions` which "just work". + /// Set compression parameters, see [Compression] for the available options. pub fn set_compression(&mut self, compression: Compression) { - self.options.compression = compression; + self.set_deflate_compression(DeflateCompression::from_simple(compression)); + self.set_filter(Filter::from_simple(compression)); } - /// Set the used filter type. + /// Provides in-depth customization of DEFLATE compression options. /// - /// The default filter is [`FilterType::Sub`] which provides a basic prediction algorithm for - /// sample values based on the previous. For a potentially better compression ratio, at the - /// cost of more complex processing, try out [`FilterType::Paeth`]. - pub fn set_filter(&mut self, filter: FilterType) { - self.options.filter = filter; + /// For a simpler selection of compression options see [Self::set_compression]. + pub fn set_deflate_compression(&mut self, compression: DeflateCompression) { + self.options.compression = compression; } - /// Set the adaptive filter type. + /// Set the used filter type. + /// + /// The default filter is [`Filter::Adaptive`] which automatically selects the best filter + /// for each row of the image. /// - /// Adaptive filtering attempts to select the best filter for each line - /// based on heuristics which minimize the file size for compression rather - /// than use a single filter for the entire image. The default method is - /// [`AdaptiveFilterType::NonAdaptive`]. - pub fn set_adaptive_filter(&mut self, adaptive_filter: AdaptiveFilterType) { - self.options.adaptive_filter = adaptive_filter; + /// You should only change this if you are after very fast compression, + /// and either don't care about compression ratio or know exactly what works best for your images. + pub fn set_filter(&mut self, filter: Filter) { + self.options.filter = filter; } /// Set the fraction of time every frame is going to be displayed, in seconds. @@ -784,22 +781,23 @@ impl Writer { let bpp = self.info.bpp_in_prediction(); let filter_method = self.options.filter; - let adaptive_method = self.options.adaptive_filter; let zlib_encoded = match self.options.compression { - Compression::Fast => { + DeflateCompression::NoCompression => { + let mut compressor = + fdeflate::StoredOnlyCompressor::new(std::io::Cursor::new(Vec::new()))?; + for line in data.chunks(in_len) { + compressor.write_data(&[0])?; + compressor.write_data(line)?; + } + compressor.finish()?.into_inner() + } + DeflateCompression::FdeflateUltraFast => { let mut compressor = fdeflate::Compressor::new(std::io::Cursor::new(Vec::new()))?; let mut current = vec![0; in_len + 1]; for line in data.chunks(in_len) { - let filter_type = filter( - filter_method, - adaptive_method, - bpp, - prev, - line, - &mut current[1..], - ); + let filter_type = filter(filter_method, bpp, prev, line, &mut current[1..]); current[0] = filter_type as u8; compressor.write_data(¤t)?; @@ -813,10 +811,7 @@ impl Writer { // Write uncompressed data since the result from fast compression would take // more space than that. // - // We always use FilterType::NoFilter here regardless of the filter method - // requested by the user. Doing filtering again would only add performance - // cost for both encoding and subsequent decoding, without improving the - // compression ratio. + // This is essentially a fallback to NoCompression. let mut compressor = fdeflate::StoredOnlyCompressor::new(std::io::Cursor::new(Vec::new()))?; for line in data.chunks(in_len) { @@ -828,19 +823,13 @@ impl Writer { compressed } } - _ => { + DeflateCompression::Flate2(level) => { let mut current = vec![0; in_len]; - let mut zlib = ZlibEncoder::new(Vec::new(), self.options.compression.to_options()); + let mut zlib = + ZlibEncoder::new(Vec::new(), flate2::Compression::new(u32::from(level))); for line in data.chunks(in_len) { - let filter_type = filter( - filter_method, - adaptive_method, - bpp, - prev, - line, - &mut current, - ); + let filter_type = filter(filter_method, bpp, prev, line, &mut current); zlib.write_all(&[filter_type as u8])?; zlib.write_all(¤t)?; @@ -910,23 +899,15 @@ impl Writer { Ok(()) } - /// Set the used filter type for the following frames. + /// Set the used filter type. /// - /// The default filter is [`FilterType::Sub`] which provides a basic prediction algorithm for - /// sample values based on the previous. For a potentially better compression ratio, at the - /// cost of more complex processing, try out [`FilterType::Paeth`]. - pub fn set_filter(&mut self, filter: FilterType) { - self.options.filter = filter; - } - - /// Set the adaptive filter type for the following frames. + /// The default filter is [`Filter::Adaptive`] which automatically selects the best filter + /// for each row of the image. /// - /// Adaptive filtering attempts to select the best filter for each line - /// based on heuristics which minimize the file size for compression rather - /// than use a single filter for the entire image. The default method is - /// [`AdaptiveFilterType::NonAdaptive`]. - pub fn set_adaptive_filter(&mut self, adaptive_filter: AdaptiveFilterType) { - self.options.adaptive_filter = adaptive_filter; + /// You should only change this if you are after very fast compression, + /// and either don't care about compression ratio or know exactly what works best for your images. + pub fn set_filter(&mut self, filter: Filter) { + self.options.filter = filter; } /// Set the fraction of time the following frames are going to be displayed, @@ -1404,10 +1385,9 @@ pub struct StreamWriter<'a, W: Write> { height: u32, bpp: BytesPerPixel, - filter: FilterType, - adaptive_filter: AdaptiveFilterType, + filter: Filter, fctl: Option, - compression: Compression, + compression: DeflateCompression, } impl<'a, W: Write> StreamWriter<'a, W> { @@ -1423,14 +1403,13 @@ impl<'a, W: Write> StreamWriter<'a, W> { let in_len = writer.info.raw_row_length() - 1; let filter = writer.options.filter; let compression = writer.options.compression; - let adaptive_filter = writer.options.adaptive_filter; let prev_buf = vec![0; in_len]; let curr_buf = vec![0; in_len]; let mut chunk_writer = ChunkWriter::new(writer, buf_len); let (line_len, to_write) = chunk_writer.next_frame_info(); chunk_writer.write_header()?; - let zlib = ZlibEncoder::new(chunk_writer, compression.to_options()); + let zlib = ZlibEncoder::new(chunk_writer, compression.closest_flate2_level()); Ok(StreamWriter { writer: Wrapper::Zlib(zlib), @@ -1441,7 +1420,6 @@ impl<'a, W: Write> StreamWriter<'a, W> { filter, width, height, - adaptive_filter, line_len, to_write, fctl, @@ -1449,29 +1427,17 @@ impl<'a, W: Write> StreamWriter<'a, W> { }) } - /// Set the used filter type for the next frame. + /// Set the used filter type. /// - /// The default filter is [`FilterType::Sub`] which provides a basic prediction algorithm for - /// sample values based on the previous. + /// The default filter is [`Filter::Adaptive`] which automatically selects the best filter + /// for each row of the image. /// - /// For optimal compression ratio you should enable adaptive filtering - /// instead of setting a single filter for the entire image, see - /// [set_adaptive_filter](Self::set_adaptive_filter). - pub fn set_filter(&mut self, filter: FilterType) { + /// You should only change this if you are after very fast compression, + /// and either don't care about compression ratio or know exactly what works best for your images. + pub fn set_filter(&mut self, filter: Filter) { self.filter = filter; } - /// Set the adaptive filter type for the next frame. - /// - /// Adaptive filtering attempts to select the best filter for each line - /// based on heuristics which minimize the file size for compression rather - /// than use a single filter for the entire image. - /// - /// The default method is [`AdaptiveFilterType::NonAdaptive`]. - pub fn set_adaptive_filter(&mut self, adaptive_filter: AdaptiveFilterType) { - self.adaptive_filter = adaptive_filter; - } - /// Set the fraction of time the following frames are going to be displayed, /// in seconds /// @@ -1673,7 +1639,7 @@ impl<'a, W: Write> StreamWriter<'a, W> { // now it can be taken because the next statements cannot cause any errors match self.writer.take() { Wrapper::Chunk(wrt) => { - let encoder = ZlibEncoder::new(wrt, self.compression.to_options()); + let encoder = ZlibEncoder::new(wrt, self.compression.closest_flate2_level()); self.writer = Wrapper::Zlib(encoder); } _ => unreachable!(), @@ -1721,7 +1687,6 @@ impl<'a, W: Write> Write for StreamWriter<'a, W> { let mut filtered = vec![0; self.curr_buf.len()]; let filter_type = filter( self.filter, - self.adaptive_filter, self.bpp, &self.prev_buf, &self.curr_buf, @@ -1769,25 +1734,6 @@ impl Drop for StreamWriter<'_, W> { } } -/// Mod to encapsulate the converters depending on the `deflate` crate. -/// -/// Since this only contains trait impls, there is no need to make this public, they are simply -/// available when the mod is compiled as well. -impl Compression { - fn to_options(self) -> flate2::Compression { - #[allow(deprecated)] - match self { - Compression::Default => flate2::Compression::default(), - Compression::Fast => flate2::Compression::fast(), - Compression::Best => flate2::Compression::best(), - #[allow(deprecated)] - Compression::Huffman => flate2::Compression::none(), - #[allow(deprecated)] - Compression::Rle => flate2::Compression::none(), - } - } -} - #[cfg(test)] mod tests { use super::*; @@ -1825,30 +1771,34 @@ mod tests { let mut reader = decoder.read_info().unwrap(); let mut buf = vec![0; reader.output_buffer_size()]; let info = reader.next_frame(&mut buf).unwrap(); - // Encode decoded image - let mut out = Vec::new(); - { - let mut wrapper = RandomChunkWriter { - rng: thread_rng(), - w: &mut out, - }; - - let mut encoder = Encoder::new(&mut wrapper, info.width, info.height); - encoder.set_color(info.color_type); - encoder.set_depth(info.bit_depth); - if let Some(palette) = &reader.info().palette { - encoder.set_palette(palette.clone()); + use DeflateCompression::*; + for compression in [NoCompression, FdeflateUltraFast, Flate2(4)] { + // Encode decoded image + let mut out = Vec::new(); + { + let mut wrapper = RandomChunkWriter { + rng: thread_rng(), + w: &mut out, + }; + + let mut encoder = Encoder::new(&mut wrapper, info.width, info.height); + encoder.set_color(info.color_type); + encoder.set_depth(info.bit_depth); + encoder.set_deflate_compression(compression); + if let Some(palette) = &reader.info().palette { + encoder.set_palette(palette.clone()); + } + let mut encoder = encoder.write_header().unwrap(); + encoder.write_image_data(&buf).unwrap(); } - let mut encoder = encoder.write_header().unwrap(); - encoder.write_image_data(&buf).unwrap(); + // Decode encoded decoded image + let decoder = Decoder::new(&*out); + let mut reader = decoder.read_info().unwrap(); + let mut buf2 = vec![0; reader.output_buffer_size()]; + reader.next_frame(&mut buf2).unwrap(); + // check if the encoded image is ok: + assert_eq!(buf, buf2); } - // Decode encoded decoded image - let decoder = Decoder::new(&*out); - let mut reader = decoder.read_info().unwrap(); - let mut buf2 = vec![0; reader.output_buffer_size()]; - reader.next_frame(&mut buf2).unwrap(); - // check if the encoded image is ok: - assert_eq!(buf, buf2); } } } @@ -1879,37 +1829,41 @@ mod tests { let mut reader = decoder.read_info().unwrap(); let mut buf = vec![0; reader.output_buffer_size()]; let info = reader.next_frame(&mut buf).unwrap(); - // Encode decoded image - let mut out = Vec::new(); - { - let mut wrapper = RandomChunkWriter { - rng: thread_rng(), - w: &mut out, - }; - - let mut encoder = Encoder::new(&mut wrapper, info.width, info.height); - encoder.set_color(info.color_type); - encoder.set_depth(info.bit_depth); - if let Some(palette) = &reader.info().palette { - encoder.set_palette(palette.clone()); + use DeflateCompression::*; + for compression in [NoCompression, FdeflateUltraFast, Flate2(4)] { + // Encode decoded image + let mut out = Vec::new(); + { + let mut wrapper = RandomChunkWriter { + rng: thread_rng(), + w: &mut out, + }; + + let mut encoder = Encoder::new(&mut wrapper, info.width, info.height); + encoder.set_color(info.color_type); + encoder.set_depth(info.bit_depth); + encoder.set_deflate_compression(compression); + if let Some(palette) = &reader.info().palette { + encoder.set_palette(palette.clone()); + } + let mut encoder = encoder.write_header().unwrap(); + let mut stream_writer = encoder.stream_writer().unwrap(); + + let mut outer_wrapper = RandomChunkWriter { + rng: thread_rng(), + w: &mut stream_writer, + }; + + outer_wrapper.write_all(&buf).unwrap(); } - let mut encoder = encoder.write_header().unwrap(); - let mut stream_writer = encoder.stream_writer().unwrap(); - - let mut outer_wrapper = RandomChunkWriter { - rng: thread_rng(), - w: &mut stream_writer, - }; - - outer_wrapper.write_all(&buf).unwrap(); + // Decode encoded decoded image + let decoder = Decoder::new(&*out); + let mut reader = decoder.read_info().unwrap(); + let mut buf2 = vec![0; reader.output_buffer_size()]; + reader.next_frame(&mut buf2).unwrap(); + // check if the encoded image is ok: + assert_eq!(buf, buf2); } - // Decode encoded decoded image - let decoder = Decoder::new(&*out); - let mut reader = decoder.read_info().unwrap(); - let mut buf2 = vec![0; reader.output_buffer_size()]; - reader.next_frame(&mut buf2).unwrap(); - // check if the encoded image is ok: - assert_eq!(buf, buf2); } } } @@ -2136,7 +2090,7 @@ mod tests { fn all_filters_roundtrip() -> io::Result<()> { let pixel: Vec<_> = (0..48).collect(); - let roundtrip = |filter: FilterType| -> io::Result<()> { + let roundtrip = |filter: Filter| -> io::Result<()> { let mut buffer = vec![]; let mut encoder = Encoder::new(&mut buffer, 4, 4); encoder.set_depth(BitDepth::Eight); @@ -2156,11 +2110,11 @@ mod tests { Ok(()) }; - roundtrip(FilterType::NoFilter)?; - roundtrip(FilterType::Sub)?; - roundtrip(FilterType::Up)?; - roundtrip(FilterType::Avg)?; - roundtrip(FilterType::Paeth)?; + roundtrip(Filter::NoFilter)?; + roundtrip(Filter::Sub)?; + roundtrip(Filter::Up)?; + roundtrip(Filter::Avg)?; + roundtrip(Filter::Paeth)?; Ok(()) } @@ -2174,7 +2128,7 @@ mod tests { let mut encoder = Encoder::new(&mut buffer, 4, 4); encoder.set_depth(BitDepth::Eight); encoder.set_color(ColorType::Rgb); - encoder.set_filter(FilterType::Avg); + encoder.set_filter(Filter::Avg); if let Some(gamma) = gamma { encoder.set_source_gamma(gamma); } @@ -2398,7 +2352,7 @@ mod tests { let mut encoder = Encoder::new(&mut cursor, 8, 8); encoder.set_color(ColorType::Rgba); - encoder.set_filter(FilterType::Paeth); + encoder.set_filter(Filter::Paeth); let mut writer = encoder.write_header()?; let mut stream = writer.stream_writer()?; diff --git a/src/filter.rs b/src/filter.rs index da1e2966..38b619b1 100644 --- a/src/filter.rs +++ b/src/filter.rs @@ -1,6 +1,6 @@ use core::convert::TryInto; -use crate::common::BytesPerPixel; +use crate::{common::BytesPerPixel, Compression}; /// SIMD helpers for `fn unfilter` /// @@ -60,7 +60,7 @@ mod simd { out.into() } - /// Memory of previous pixels (as needed to unfilter `FilterType::Paeth`). + /// Memory of previous pixels (as needed to unfilter `Filter::Paeth`). /// See also https://www.w3.org/TR/png/#filter-byte-positions #[derive(Default)] struct PaethState @@ -75,7 +75,7 @@ mod simd { a: Simd, } - /// Mutates `x` as needed to unfilter `FilterType::Paeth`. + /// Mutates `x` as needed to unfilter `Filter::Paeth`. /// /// `b` is the current pixel in the previous row. `x` is the current pixel in the current row. /// See also https://www.w3.org/TR/png/#filter-byte-positions @@ -124,7 +124,7 @@ mod simd { dest[0..3].copy_from_slice(&src.to_array()[0..3]) } - /// Undoes `FilterType::Paeth` for `BytesPerPixel::Three`. + /// Undoes `Filter::Paeth` for `BytesPerPixel::Three`. pub fn unfilter_paeth3(mut prev_row: &[u8], mut curr_row: &mut [u8]) { debug_assert_eq!(prev_row.len(), curr_row.len()); debug_assert_eq!(prev_row.len() % 3, 0); @@ -155,7 +155,7 @@ mod simd { store3(x, curr_row); } - /// Undoes `FilterType::Paeth` for `BytesPerPixel::Four` and `BytesPerPixel::Eight`. + /// Undoes `Filter::Paeth` for `BytesPerPixel::Four` and `BytesPerPixel::Eight`. /// /// This function calculates the Paeth predictor entirely in `Simd` /// without converting to an intermediate `Simd`. Doing so avoids @@ -187,7 +187,7 @@ mod simd { dest[0..6].copy_from_slice(&src.to_array()[0..6]) } - /// Undoes `FilterType::Paeth` for `BytesPerPixel::Six`. + /// Undoes `Filter::Paeth` for `BytesPerPixel::Six`. pub fn unfilter_paeth6(mut prev_row: &[u8], mut curr_row: &mut [u8]) { debug_assert_eq!(prev_row.len(), curr_row.len()); debug_assert_eq!(prev_row.len() % 6, 0); @@ -226,9 +226,53 @@ mod simd { /// this does not operate on pixels but on raw bytes of a scanline. /// /// Details on how each filter works can be found in the [PNG Book](http://www.libpng.org/pub/png/book/chapter09.html). +/// +/// The default filter is `Adaptive`, which uses heuristics to select the best filter for every row. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[non_exhaustive] +pub enum Filter { + NoFilter, + Sub, + Up, + Avg, + Paeth, + Adaptive, +} + +impl Default for Filter { + fn default() -> Self { + Filter::Adaptive + } +} + +impl From for Filter { + fn from(value: RowFilter) -> Self { + match value { + RowFilter::NoFilter => Filter::NoFilter, + RowFilter::Sub => Filter::Sub, + RowFilter::Up => Filter::Up, + RowFilter::Avg => Filter::Avg, + RowFilter::Paeth => Filter::Paeth, + } + } +} + +impl Filter { + pub(crate) fn from_simple(compression: Compression) -> Self { + match compression { + Compression::NoCompression => Filter::NoFilter, // with no DEFLATE filtering would only waste time + Compression::Fastest => Filter::Up, // pairs well with FdeflateUltraFast, producing much smaller files while being very fast + Compression::Fast => Filter::Adaptive, + Compression::Balanced => Filter::Adaptive, + Compression::High => Filter::Adaptive, + } + } +} + +/// Unlike the public [Filter], does not include the "Adaptive" option #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[repr(u8)] -pub enum FilterType { +pub(crate) enum RowFilter { NoFilter = 0, Sub = 1, Up = 2, @@ -236,44 +280,33 @@ pub enum FilterType { Paeth = 4, } -impl Default for FilterType { +impl Default for RowFilter { fn default() -> Self { - FilterType::Sub + RowFilter::Up } } -impl FilterType { - /// u8 -> Self. Temporary solution until Rust provides a canonical one. - pub fn from_u8(n: u8) -> Option { +impl RowFilter { + pub fn from_u8(n: u8) -> Option { match n { - 0 => Some(FilterType::NoFilter), - 1 => Some(FilterType::Sub), - 2 => Some(FilterType::Up), - 3 => Some(FilterType::Avg), - 4 => Some(FilterType::Paeth), + 0 => Some(Self::NoFilter), + 1 => Some(Self::Sub), + 2 => Some(Self::Up), + 3 => Some(Self::Avg), + 4 => Some(Self::Paeth), _ => None, } } -} -/// Adaptive filtering tries every possible filter for each row and uses a heuristic to select the best one. -/// This improves compression ratio, but makes encoding slightly slower. -/// -/// It is recommended to use `Adaptive` whenever you care about compression ratio. -/// Filtering is quite cheap compared to other parts of encoding, but can contribute -/// to the compression ratio significantly. -/// -/// `NonAdaptive` filtering is the default. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[repr(u8)] -pub enum AdaptiveFilterType { - Adaptive, - NonAdaptive, -} - -impl Default for AdaptiveFilterType { - fn default() -> Self { - AdaptiveFilterType::NonAdaptive + pub fn from_method(strat: Filter) -> Option { + match strat { + Filter::NoFilter => Some(Self::NoFilter), + Filter::Sub => Some(Self::Sub), + Filter::Up => Some(Self::Up), + Filter::Avg => Some(Self::Avg), + Filter::Paeth => Some(Self::Paeth), + Filter::Adaptive => None, + } } } @@ -370,12 +403,12 @@ fn filter_paeth_fpnge(a: u8, b: u8, c: u8) -> u8 { } pub(crate) fn unfilter( - mut filter: FilterType, + mut filter: RowFilter, tbpp: BytesPerPixel, previous: &[u8], current: &mut [u8], ) { - use self::FilterType::*; + use self::RowFilter::*; // If the previous row is empty, then treat it as if it were filled with zeros. if previous.is_empty() { @@ -866,14 +899,14 @@ pub(crate) fn unfilter( } fn filter_internal( - method: FilterType, + method: RowFilter, bpp: usize, len: usize, previous: &[u8], current: &[u8], output: &mut [u8], -) -> FilterType { - use self::FilterType::*; +) -> RowFilter { + use self::RowFilter::*; // This value was chosen experimentally based on what achieved the best performance. The // Rust compiler does auto-vectorization, and 32-bytes per loop iteration seems to enable @@ -1005,24 +1038,20 @@ fn filter_internal( } pub(crate) fn filter( - method: FilterType, - adaptive: AdaptiveFilterType, + method: Filter, bpp: BytesPerPixel, previous: &[u8], current: &[u8], output: &mut [u8], -) -> FilterType { - use FilterType::*; +) -> RowFilter { + use RowFilter::*; let bpp = bpp.into_usize(); let len = current.len(); - match adaptive { - AdaptiveFilterType::NonAdaptive => { - filter_internal(method, bpp, len, previous, current, output) - } - AdaptiveFilterType::Adaptive => { + match method { + Filter::Adaptive => { let mut min_sum: u64 = u64::MAX; - let mut filter_choice = FilterType::NoFilter; + let mut filter_choice = RowFilter::NoFilter; for &filter in [Sub, Up, Avg, Paeth].iter() { filter_internal(filter, bpp, len, previous, current, output); let sum = sum_buffer(output); @@ -1037,6 +1066,10 @@ pub(crate) fn filter( } filter_choice } + _ => { + let filter = RowFilter::from_method(method).unwrap(); + filter_internal(filter, bpp, len, previous, current, output) + } } } @@ -1076,11 +1109,10 @@ mod test { let previous: Vec<_> = iter::repeat(1).take(LEN.into()).collect(); let current: Vec<_> = (0..LEN).collect(); let expected = current.clone(); - let adaptive = AdaptiveFilterType::NonAdaptive; - let roundtrip = |kind, bpp: BytesPerPixel| { + let roundtrip = |kind: RowFilter, bpp: BytesPerPixel| { let mut output = vec![0; LEN.into()]; - filter(kind, adaptive, bpp, &previous, ¤t, &mut output); + filter(kind.into(), bpp, &previous, ¤t, &mut output); unfilter(kind, bpp, &previous, &mut output); assert_eq!( output, expected, @@ -1090,11 +1122,11 @@ mod test { }; let filters = [ - FilterType::NoFilter, - FilterType::Sub, - FilterType::Up, - FilterType::Avg, - FilterType::Paeth, + RowFilter::NoFilter, + RowFilter::Sub, + RowFilter::Up, + RowFilter::Avg, + RowFilter::Paeth, ]; let bpps = [ @@ -1139,11 +1171,10 @@ mod test { let previous: Vec<_> = (0..LEN).collect(); let current: Vec<_> = (0..LEN).collect(); let expected = current.clone(); - let adaptive = AdaptiveFilterType::NonAdaptive; - let roundtrip = |kind, bpp: BytesPerPixel| { + let roundtrip = |kind: RowFilter, bpp: BytesPerPixel| { let mut output = vec![0; LEN.into()]; - filter(kind, adaptive, bpp, &previous, ¤t, &mut output); + filter(kind.into(), bpp, &previous, ¤t, &mut output); unfilter(kind, bpp, &previous, &mut output); assert_eq!( output, expected, @@ -1153,11 +1184,11 @@ mod test { }; let filters = [ - FilterType::NoFilter, - FilterType::Sub, - FilterType::Up, - FilterType::Avg, - FilterType::Paeth, + RowFilter::NoFilter, + RowFilter::Sub, + RowFilter::Up, + RowFilter::Avg, + RowFilter::Paeth, ]; let bpps = [ diff --git a/src/lib.rs b/src/lib.rs index 54a789c2..6e6c3e9b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -77,7 +77,7 @@ pub use crate::common::*; pub use crate::decoder::stream::{DecodeOptions, Decoded, DecodingError, StreamingDecoder}; pub use crate::decoder::{Decoder, InterlaceInfo, InterlacedRow, Limits, OutputInfo, Reader}; pub use crate::encoder::{Encoder, EncodingError, StreamWriter, Writer}; -pub use crate::filter::{AdaptiveFilterType, FilterType}; +pub use crate::filter::Filter; #[cfg(test)] pub(crate) mod test_utils;