Switch to image-webp (#2148)

This commit is contained in:
Jonathan Behrens 2024-02-18 15:09:12 -08:00 committed by GitHub
commit 3b1fbcf2ae
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
29 changed files with 121 additions and 6334 deletions

View file

@ -38,14 +38,14 @@ jobs:
strategy:
fail-fast: false
matrix:
rust: ["1.63.0", nightly, beta]
rust: ["1.67.1", nightly, beta]
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@nightly
if: ${{ matrix.rust == '1.63.0' }}
if: ${{ matrix.rust == '1.67.1' }}
- name: Generate Cargo.lock with minimal-version dependencies
if: ${{ matrix.rust == '1.63.0' }}
if: ${{ matrix.rust == '1.67.1' }}
run: cargo -Zminimal-versions generate-lockfile
- uses: dtolnay/rust-toolchain@v1
@ -58,7 +58,7 @@ jobs:
- name: build
run: cargo build -v
- name: test
if: ${{ matrix.rust != '1.63.0' }}
if: ${{ matrix.rust != '1.67.1' }}
run: cargo test -v && cargo doc -v
test_other_archs:

View file

@ -5,7 +5,7 @@ edition = "2021"
resolver = "2"
# note: when changed, also update test runner in `.github/workflows/rust.yml`
rust-version = "1.63.0"
rust-version = "1.67.1"
license = "MIT OR Apache-2.0"
description = "Imaging library. Provides basic image processing and encoders/decoders for common image formats."
@ -45,6 +45,7 @@ dav1d = { version = "0.10.2", optional = true }
dcv-color-primitives = { version = "0.6.1", optional = true }
exr = { version = "1.5.0", optional = true }
gif = { version = "0.13", optional = true }
image-webp = { version = "0.1.0", optional = true }
mp4parse = { version = "0.17.0", optional = true }
png = { version = "0.17.6", optional = true }
qoi = { version = "0.4", optional = true }
@ -84,7 +85,7 @@ pnm = []
qoi = ["dep:qoi"]
tga = []
tiff = ["dep:tiff"]
webp = []
webp = ["dep:image-webp"]
# Other features
rayon = ["dep:rayon"] # Enables multi-threading

View file

@ -1,12 +1,14 @@
extern crate afl;
extern crate image;
use std::io::Cursor;
use image::{DynamicImage, ImageDecoder};
use image::error::{ImageError, ImageResult, LimitError, LimitErrorKind};
#[inline(always)]
fn webp_decode(data: &[u8]) -> ImageResult<DynamicImage> {
let decoder = image::codecs::webp::WebPDecoder::new(data)?;
let decoder = image::codecs::webp::WebPDecoder::new(Cursor::new(data))?;
let (width, height) = decoder.dimensions();
if width.saturating_mul(height) > 4_000_000 {

View file

@ -1,5 +1,7 @@
extern crate image;
use std::io::Cursor;
use image::{DynamicImage, ImageDecoder};
use image::error::{ImageError, ImageResult, LimitError, LimitErrorKind};
@ -7,7 +9,7 @@ mod utils;
#[inline(always)]
fn webp_decode(data: &[u8]) -> ImageResult<DynamicImage> {
let decoder = image::codecs::webp::WebPDecoder::new(data)?;
let decoder = image::codecs::webp::WebPDecoder::new(Cursor::new(data))?;
let (width, height) = decoder.dimensions();
if width.saturating_mul(height) > 4_000_000 {

View file

@ -1,322 +1,57 @@
use byteorder::{LittleEndian, ReadBytesExt};
use std::convert::TryFrom;
use std::io::{self, Cursor, Error, Read};
use std::{error, fmt};
use std::io::{Read, Seek};
use crate::error::{DecodingError, ImageError, ImageResult, ParameterError, ParameterErrorKind};
use crate::buffer::ConvertBuffer;
use crate::error::{DecodingError, ImageError, ImageResult};
use crate::image::{ImageDecoder, ImageFormat};
use crate::{color, AnimationDecoder, Frames, Rgba};
use super::lossless::{LosslessDecoder, LosslessFrame};
use super::vp8::{Frame as VP8Frame, Vp8Decoder};
use super::extended::{read_extended_header, ExtendedImage};
/// All errors that can occur when attempting to parse a WEBP container
#[derive(Debug, Clone, Copy)]
#[allow(clippy::enum_variant_names)]
pub(crate) enum DecoderError {
/// RIFF's "RIFF" signature not found or invalid
RiffSignatureInvalid([u8; 4]),
/// WebP's "WEBP" signature not found or invalid
WebpSignatureInvalid([u8; 4]),
/// Chunk Header was incorrect or invalid in its usage
ChunkHeaderInvalid([u8; 4]),
}
impl fmt::Display for DecoderError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
struct SignatureWriter([u8; 4]);
impl fmt::Display for SignatureWriter {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"[{:#04X?}, {:#04X?}, {:#04X?}, {:#04X?}]",
self.0[0], self.0[1], self.0[2], self.0[3]
)
}
}
match self {
DecoderError::RiffSignatureInvalid(riff) => f.write_fmt(format_args!(
"Invalid RIFF signature: {}",
SignatureWriter(*riff)
)),
DecoderError::WebpSignatureInvalid(webp) => f.write_fmt(format_args!(
"Invalid WebP signature: {}",
SignatureWriter(*webp)
)),
DecoderError::ChunkHeaderInvalid(header) => f.write_fmt(format_args!(
"Invalid Chunk header: {}",
SignatureWriter(*header)
)),
}
}
}
impl From<DecoderError> for ImageError {
fn from(e: DecoderError) -> ImageError {
ImageError::Decoding(DecodingError::new(ImageFormat::WebP.into(), e))
}
}
impl error::Error for DecoderError {}
/// All possible RIFF chunks in a WebP image file
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, Clone, Copy, PartialEq)]
pub(crate) enum WebPRiffChunk {
RIFF,
WEBP,
VP8,
VP8L,
VP8X,
ANIM,
ANMF,
ALPH,
ICCP,
EXIF,
XMP,
}
impl WebPRiffChunk {
pub(crate) fn from_fourcc(chunk_fourcc: [u8; 4]) -> ImageResult<Self> {
match &chunk_fourcc {
b"RIFF" => Ok(Self::RIFF),
b"WEBP" => Ok(Self::WEBP),
b"VP8 " => Ok(Self::VP8),
b"VP8L" => Ok(Self::VP8L),
b"VP8X" => Ok(Self::VP8X),
b"ANIM" => Ok(Self::ANIM),
b"ANMF" => Ok(Self::ANMF),
b"ALPH" => Ok(Self::ALPH),
b"ICCP" => Ok(Self::ICCP),
b"EXIF" => Ok(Self::EXIF),
b"XMP " => Ok(Self::XMP),
_ => Err(DecoderError::ChunkHeaderInvalid(chunk_fourcc).into()),
}
}
pub(crate) fn to_fourcc(self) -> [u8; 4] {
match self {
Self::RIFF => *b"RIFF",
Self::WEBP => *b"WEBP",
Self::VP8 => *b"VP8 ",
Self::VP8L => *b"VP8L",
Self::VP8X => *b"VP8X",
Self::ANIM => *b"ANIM",
Self::ANMF => *b"ANMF",
Self::ALPH => *b"ALPH",
Self::ICCP => *b"ICCP",
Self::EXIF => *b"EXIF",
Self::XMP => *b"XMP ",
}
}
}
enum WebPImage {
Lossy(VP8Frame),
Lossless(LosslessFrame),
Extended(ExtendedImage),
}
use crate::{AnimationDecoder, ColorType, Delay, Frame, Frames, RgbImage, Rgba, RgbaImage};
/// WebP Image format decoder. Currently only supports lossy RGB images or lossless RGBA images.
pub struct WebPDecoder<R> {
r: R,
image: WebPImage,
inner: image_webp::WebPDecoder<R>,
}
impl<R: Read> WebPDecoder<R> {
impl<R: Read + Seek> WebPDecoder<R> {
/// Create a new WebPDecoder from the Reader ```r```.
/// This function takes ownership of the Reader.
pub fn new(r: R) -> ImageResult<WebPDecoder<R>> {
let image = WebPImage::Lossy(Default::default());
let mut decoder = WebPDecoder { r, image };
decoder.read_data()?;
Ok(decoder)
}
//reads the 12 bytes of the WebP file header
fn read_riff_header(&mut self) -> ImageResult<u32> {
let mut riff = [0; 4];
self.r.read_exact(&mut riff)?;
if &riff != b"RIFF" {
return Err(DecoderError::RiffSignatureInvalid(riff).into());
}
let size = self.r.read_u32::<LittleEndian>()?;
let mut webp = [0; 4];
self.r.read_exact(&mut webp)?;
if &webp != b"WEBP" {
return Err(DecoderError::WebpSignatureInvalid(webp).into());
}
Ok(size)
}
//reads the chunk header, decodes the frame and returns the inner decoder
fn read_frame(&mut self) -> ImageResult<WebPImage> {
let chunk = read_chunk(&mut self.r)?;
match chunk {
Some((cursor, WebPRiffChunk::VP8)) => {
let mut vp8_decoder = Vp8Decoder::new(cursor);
let frame = vp8_decoder.decode_frame()?;
Ok(WebPImage::Lossy(frame.clone()))
}
Some((cursor, WebPRiffChunk::VP8L)) => {
let mut lossless_decoder = LosslessDecoder::new(cursor);
let frame = lossless_decoder.decode_frame()?;
Ok(WebPImage::Lossless(frame.clone()))
}
Some((mut cursor, WebPRiffChunk::VP8X)) => {
let info = read_extended_header(&mut cursor)?;
let image = ExtendedImage::read_extended_chunks(&mut self.r, info)?;
Ok(WebPImage::Extended(image))
}
None => Err(ImageError::IoError(Error::from(
io::ErrorKind::UnexpectedEof,
))),
Some((_, chunk)) => Err(DecoderError::ChunkHeaderInvalid(chunk.to_fourcc()).into()),
}
}
fn read_data(&mut self) -> ImageResult<()> {
let _size = self.read_riff_header()?;
let image = self.read_frame()?;
self.image = image;
Ok(())
pub fn new(r: R) -> ImageResult<Self> {
Ok(Self {
inner: image_webp::WebPDecoder::new(r).map_err(ImageError::from_webp_decode)?,
})
}
/// Returns true if the image as described by the bitstream is animated.
pub fn has_animation(&self) -> bool {
match &self.image {
WebPImage::Lossy(_) => false,
WebPImage::Lossless(_) => false,
WebPImage::Extended(extended) => extended.has_animation(),
}
self.inner.is_animated()
}
/// Sets the background color if the image is an extended and animated webp.
pub fn set_background_color(&mut self, color: Rgba<u8>) -> ImageResult<()> {
match &mut self.image {
WebPImage::Extended(image) => image.set_background_color(color),
_ => Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::Generic(
"Background color can only be set on animated webp".to_owned(),
),
))),
}
self.inner
.set_background_color(color.0)
.map_err(ImageError::from_webp_decode)
}
}
pub(crate) fn read_len_cursor<R>(r: &mut R) -> ImageResult<Cursor<Vec<u8>>>
where
R: Read,
{
let unpadded_len = u64::from(r.read_u32::<LittleEndian>()?);
// RIFF chunks containing an uneven number of bytes append
// an extra 0x00 at the end of the chunk
//
// The addition cannot overflow since we have a u64 that was created from a u32
let len = unpadded_len + (unpadded_len % 2);
let mut framedata = Vec::new();
r.by_ref().take(len).read_to_end(&mut framedata)?;
//remove padding byte
if unpadded_len % 2 == 1 {
framedata.pop();
}
Ok(io::Cursor::new(framedata))
}
/// Reads a chunk header FourCC
/// Returns None if and only if we hit end of file reading the four character code of the chunk
/// The inner error is `Err` if and only if the chunk header FourCC is present but unknown
pub(crate) fn read_fourcc<R: Read>(r: &mut R) -> ImageResult<Option<ImageResult<WebPRiffChunk>>> {
let mut chunk_fourcc = [0; 4];
let result = r.read_exact(&mut chunk_fourcc);
match result {
Ok(()) => {}
Err(err) => {
if err.kind() == io::ErrorKind::UnexpectedEof {
return Ok(None);
} else {
return Err(err.into());
}
}
}
let chunk = WebPRiffChunk::from_fourcc(chunk_fourcc);
Ok(Some(chunk))
}
/// Reads a chunk
/// Returns an error if the chunk header is not a valid webp header or some other reading error
/// Returns None if and only if we hit end of file reading the four character code of the chunk
pub(crate) fn read_chunk<R>(r: &mut R) -> ImageResult<Option<(Cursor<Vec<u8>>, WebPRiffChunk)>>
where
R: Read,
{
if let Some(chunk) = read_fourcc(r)? {
let chunk = chunk?;
let cursor = read_len_cursor(r)?;
Ok(Some((cursor, chunk)))
} else {
Ok(None)
}
}
impl<R: Read> ImageDecoder for WebPDecoder<R> {
impl<R: Read + Seek> ImageDecoder for WebPDecoder<R> {
fn dimensions(&self) -> (u32, u32) {
match &self.image {
WebPImage::Lossy(vp8_frame) => {
(u32::from(vp8_frame.width), u32::from(vp8_frame.height))
}
WebPImage::Lossless(lossless_frame) => (
u32::from(lossless_frame.width),
u32::from(lossless_frame.height),
),
WebPImage::Extended(extended) => extended.dimensions(),
self.inner.dimensions()
}
fn color_type(&self) -> ColorType {
if self.inner.has_alpha() {
ColorType::Rgba8
} else {
ColorType::Rgb8
}
}
fn color_type(&self) -> color::ColorType {
match &self.image {
WebPImage::Lossy(_) => color::ColorType::Rgb8,
WebPImage::Lossless(_) => color::ColorType::Rgba8,
WebPImage::Extended(extended) => extended.color_type(),
}
}
fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
match &self.image {
WebPImage::Lossy(vp8_frame) => {
vp8_frame.fill_rgb(buf);
}
WebPImage::Lossless(lossless_frame) => {
lossless_frame.fill_rgba(buf);
}
WebPImage::Extended(extended) => {
extended.fill_buf(buf);
}
}
Ok(())
self.inner
.read_image(buf)
.map_err(ImageError::from_webp_decode)
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
@ -324,21 +59,55 @@ impl<R: Read> ImageDecoder for WebPDecoder<R> {
}
fn icc_profile(&mut self) -> ImageResult<Option<Vec<u8>>> {
if let WebPImage::Extended(extended) = &self.image {
Ok(extended.icc_profile())
} else {
Ok(None)
}
self.inner
.icc_profile()
.map_err(ImageError::from_webp_decode)
}
}
impl<'a, R: 'a + Read> AnimationDecoder<'a> for WebPDecoder<R> {
impl<'a, R: 'a + Read + Seek> AnimationDecoder<'a> for WebPDecoder<R> {
fn into_frames(self) -> Frames<'a> {
match self.image {
WebPImage::Lossy(_) | WebPImage::Lossless(_) => {
Frames::new(Box::new(std::iter::empty()))
struct FramesInner<R: Read + Seek> {
decoder: WebPDecoder<R>,
}
impl<R: Read + Seek> Iterator for FramesInner<R> {
type Item = ImageResult<Frame>;
fn next(&mut self) -> Option<Self::Item> {
let (width, height) = self.decoder.inner.dimensions();
let (img, delay) = if self.decoder.inner.has_alpha() {
let mut img = RgbaImage::new(width, height);
match self.decoder.inner.read_frame(&mut img) {
Ok(delay) => (img, delay),
Err(e) => return Some(Err(ImageError::from_webp_decode(e))),
}
} else {
let mut img = RgbImage::new(width, height);
match self.decoder.inner.read_frame(&mut img) {
Ok(delay) => (img.convert(), delay),
Err(e) => return Some(Err(ImageError::from_webp_decode(e))),
}
};
Some(Ok(Frame::from_parts(
img,
0,
0,
Delay::from_numer_denom_ms(delay, 1),
)))
}
WebPImage::Extended(extended_image) => extended_image.into_frames(),
}
Frames::new(Box::new(FramesInner { decoder: self }))
}
}
impl ImageError {
fn from_webp_decode(e: image_webp::DecodingError) -> Self {
match e {
image_webp::DecodingError::IoError(e) => ImageError::IoError(e),
_ => ImageError::Decoding(DecodingError::new(ImageFormat::WebP.into(), e)),
}
}
}

View file

@ -1,24 +1,15 @@
//! Encoding of WebP images.
use std::collections::BinaryHeap;
///
/// Uses the simple encoding API from the [libwebp] library.
///
/// [libwebp]: https://developers.google.com/speed/webp/docs/api#simple_encoding_api
use std::io::{self, Write};
use std::iter::FromIterator;
use std::slice::ChunksExact;
use crate::error::{ParameterError, ParameterErrorKind, UnsupportedError, UnsupportedErrorKind};
use crate::flat::SampleLayout;
use crate::{ColorType, ImageEncoder, ImageError, ImageFormat, ImageResult};
use std::io::Write;
use crate::{
error::{EncodingError, UnsupportedError, UnsupportedErrorKind},
ColorType, ImageEncoder, ImageError, ImageFormat, ImageResult,
};
/// WebP Encoder.
pub struct WebPEncoder<W> {
writer: W,
chunk_buffer: Vec<u8>,
buffer: u64,
nbits: u8,
inner: image_webp::WebPEncoder<W>,
}
impl<W: Write> WebPEncoder<W> {
@ -27,554 +18,10 @@ impl<W: Write> WebPEncoder<W> {
/// Uses "VP8L" lossless encoding.
pub fn new_lossless(w: W) -> Self {
Self {
writer: w,
chunk_buffer: Vec::new(),
buffer: 0,
nbits: 0,
inner: image_webp::WebPEncoder::new(w),
}
}
fn write_bits(&mut self, bits: u64, nbits: u8) -> io::Result<()> {
debug_assert!(nbits <= 64);
self.buffer |= bits << self.nbits;
self.nbits += nbits;
if self.nbits >= 64 {
self.chunk_buffer.write_all(&self.buffer.to_le_bytes())?;
self.nbits -= 64;
self.buffer = bits.checked_shr((nbits - self.nbits) as u32).unwrap_or(0);
}
debug_assert!(self.nbits < 64);
Ok(())
}
fn flush(&mut self) -> io::Result<()> {
if self.nbits % 8 != 0 {
self.write_bits(0, 8 - self.nbits % 8)?;
}
if self.nbits > 0 {
self.chunk_buffer
.write_all(&self.buffer.to_le_bytes()[..self.nbits as usize / 8])
.unwrap();
self.buffer = 0;
self.nbits = 0;
}
Ok(())
}
fn write_single_entry_huffman_tree(&mut self, symbol: u8) -> io::Result<()> {
self.write_bits(1, 2)?;
if symbol <= 1 {
self.write_bits(0, 1)?;
self.write_bits(symbol as u64, 1)?;
} else {
self.write_bits(1, 1)?;
self.write_bits(symbol as u64, 8)?;
}
Ok(())
}
fn build_huffman_tree(
&mut self,
frequencies: &[u32],
lengths: &mut [u8],
codes: &mut [u16],
length_limit: u8,
) -> bool {
assert_eq!(frequencies.len(), lengths.len());
assert_eq!(frequencies.len(), codes.len());
if frequencies.iter().filter(|&&f| f > 0).count() <= 1 {
lengths.fill(0);
codes.fill(0);
return false;
}
#[derive(Eq, PartialEq, Copy, Clone, Debug)]
struct Item(u32, u16);
impl Ord for Item {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
other.0.cmp(&self.0)
}
}
impl PartialOrd for Item {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
// Build a huffman tree
let mut internal_nodes = Vec::new();
let mut nodes = BinaryHeap::from_iter(
frequencies
.iter()
.enumerate()
.filter(|(_, &frequency)| frequency > 0)
.map(|(i, &frequency)| Item(frequency, i as u16)),
);
while nodes.len() > 1 {
let Item(frequency1, index1) = nodes.pop().unwrap();
let mut root = nodes.peek_mut().unwrap();
internal_nodes.push((index1, root.1));
*root = Item(
frequency1 + root.0,
internal_nodes.len() as u16 + frequencies.len() as u16 - 1,
);
}
// Walk the tree to assign code lengths
lengths.fill(0);
let mut stack = Vec::new();
stack.push((nodes.pop().unwrap().1, 0));
while let Some((node, depth)) = stack.pop() {
let node = node as usize;
if node < frequencies.len() {
lengths[node] = depth as u8;
} else {
let (left, right) = internal_nodes[node - frequencies.len()];
stack.push((left, depth + 1));
stack.push((right, depth + 1));
}
}
// Limit the codes to length length_limit
let mut max_length = 0;
for &length in lengths.iter() {
max_length = max_length.max(length);
}
if max_length > length_limit {
let mut counts = [0u32; 16];
for &length in lengths.iter() {
counts[length.min(length_limit) as usize] += 1;
}
let mut total = 0;
for (i, count) in counts
.iter()
.enumerate()
.skip(1)
.take(length_limit as usize)
{
total += count << (length_limit as usize - i);
}
while total > 1u32 << length_limit {
let mut i = length_limit as usize - 1;
while counts[i] == 0 {
i -= 1;
}
counts[i] -= 1;
counts[length_limit as usize] -= 1;
counts[i + 1] += 2;
total -= 1;
}
// assign new lengths
let mut len = length_limit;
let mut indexes = frequencies.iter().copied().enumerate().collect::<Vec<_>>();
indexes.sort_unstable_by_key(|&(_, frequency)| frequency);
for &(i, frequency) in indexes.iter() {
if frequency > 0 {
while counts[len as usize] == 0 {
len -= 1;
}
lengths[i] = len;
counts[len as usize] -= 1;
}
}
}
// Assign codes
codes.fill(0);
let mut code = 0u32;
for len in 1..=length_limit {
for (i, &length) in lengths.iter().enumerate() {
if length == len {
codes[i] = (code as u16).reverse_bits() >> (16 - len);
code += 1;
}
}
code <<= 1;
}
assert_eq!(code, 2 << length_limit);
true
}
fn write_huffman_tree(
&mut self,
frequencies: &[u32],
lengths: &mut [u8],
codes: &mut [u16],
) -> io::Result<()> {
if !self.build_huffman_tree(frequencies, lengths, codes, 15) {
let symbol = frequencies
.iter()
.position(|&frequency| frequency > 0)
.unwrap_or(0);
return self.write_single_entry_huffman_tree(symbol as u8);
}
let mut code_length_lengths = [0u8; 16];
let mut code_length_codes = [0u16; 16];
let mut code_length_frequencies = [0u32; 16];
for &length in lengths.iter() {
code_length_frequencies[length as usize] += 1;
}
let single_code_length_length = !self.build_huffman_tree(
&code_length_frequencies,
&mut code_length_lengths,
&mut code_length_codes,
7,
);
const CODE_LENGTH_ORDER: [usize; 19] = [
17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
];
// Write the huffman tree
self.write_bits(0, 1)?; // normal huffman tree
self.write_bits(19 - 4, 4)?; // num_code_lengths - 4
for &i in CODE_LENGTH_ORDER.iter() {
if i > 15 || code_length_frequencies[i] == 0 {
self.write_bits(0, 3)?;
} else if single_code_length_length {
self.write_bits(1, 3)?;
} else {
self.write_bits(code_length_lengths[i] as u64, 3)?;
}
}
match lengths.len() {
256 => {
self.write_bits(1, 1)?; // max_symbol is stored
self.write_bits(3, 3)?; // max_symbol_nbits / 2 - 2
self.write_bits(254, 8)?; // max_symbol - 2
}
280 => self.write_bits(0, 1)?,
_ => unreachable!(),
}
// Write the huffman codes
if !single_code_length_length {
for &len in lengths.iter() {
self.write_bits(
code_length_codes[len as usize] as u64,
code_length_lengths[len as usize],
)?;
}
}
Ok(())
}
fn length_to_symbol(len: u16) -> (u16, u8) {
let len = len - 1;
let highest_bit = 15 - len.leading_zeros() as u16; // TODO: use ilog2 once MSRV >= 1.67
let second_highest_bit = (len >> (highest_bit - 1)) & 1;
let extra_bits = highest_bit - 1;
let symbol = 2 * highest_bit + second_highest_bit;
(symbol, extra_bits as u8)
}
#[inline(always)]
fn count_run(
pixel: &[u8],
it: &mut std::iter::Peekable<ChunksExact<u8>>,
frequencies1: &mut [u32; 280],
) {
let mut run_length = 0;
while run_length < 4096 && it.peek() == Some(&pixel) {
run_length += 1;
it.next();
}
if run_length > 0 {
if run_length <= 4 {
let symbol = 256 + run_length - 1;
frequencies1[symbol] += 1;
} else {
let (symbol, _extra_bits) = Self::length_to_symbol(run_length as u16);
frequencies1[256 + symbol as usize] += 1;
}
}
}
#[inline(always)]
fn write_run(
&mut self,
pixel: &[u8],
it: &mut std::iter::Peekable<ChunksExact<u8>>,
codes1: &[u16; 280],
lengths1: &[u8; 280],
) -> io::Result<()> {
let mut run_length = 0;
while run_length < 4096 && it.peek() == Some(&pixel) {
run_length += 1;
it.next();
}
if run_length > 0 {
if run_length <= 4 {
let symbol = 256 + run_length - 1;
self.write_bits(codes1[symbol] as u64, lengths1[symbol])?;
} else {
let (symbol, extra_bits) = Self::length_to_symbol(run_length as u16);
self.write_bits(
codes1[256 + symbol as usize] as u64,
lengths1[256 + symbol as usize],
)?;
self.write_bits(
(run_length as u64 - 1) & ((1 << extra_bits) - 1),
extra_bits,
)?;
}
}
Ok(())
}
fn encode_lossless(
mut self,
data: &[u8],
width: u32,
height: u32,
color: ColorType,
) -> ImageResult<()> {
if width == 0
|| width > 16384
|| height == 0
|| height > 16384
|| !SampleLayout::row_major_packed(color.channel_count(), width, height)
.fits(data.len())
{
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
)));
}
let (is_color, is_alpha) = match color {
ColorType::L8 => (false, false),
ColorType::La8 => (false, true),
ColorType::Rgb8 => (true, false),
ColorType::Rgba8 => (true, true),
_ => {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::WebP.into(),
UnsupportedErrorKind::Color(color.into()),
),
))
}
};
self.write_bits(0x2f, 8)?; // signature
self.write_bits(width as u64 - 1, 14)?;
self.write_bits(height as u64 - 1, 14)?;
self.write_bits(is_alpha as u64, 1)?; // alpha used
self.write_bits(0x0, 3)?; // version
// subtract green transform
self.write_bits(0b101, 3)?;
// predictor transform
self.write_bits(0b111001, 6)?;
self.write_bits(0x0, 1)?; // no color cache
self.write_single_entry_huffman_tree(2)?;
for _ in 0..4 {
self.write_single_entry_huffman_tree(0)?;
}
// transforms done
self.write_bits(0x0, 1)?;
// color cache
self.write_bits(0x0, 1)?;
// meta-huffman codes
self.write_bits(0x0, 1)?;
// expand to RGBA
let mut pixels = match color {
ColorType::L8 => data.iter().flat_map(|&p| [p, p, p, 255]).collect(),
ColorType::La8 => data
.chunks_exact(2)
.flat_map(|p| [p[0], p[0], p[0], p[1]])
.collect(),
ColorType::Rgb8 => data
.chunks_exact(3)
.flat_map(|p| [p[0], p[1], p[2], 255])
.collect(),
ColorType::Rgba8 => data.to_vec(),
_ => unreachable!(),
};
// compute subtract green transform
for pixel in pixels.chunks_exact_mut(4) {
pixel[0] = pixel[0].wrapping_sub(pixel[1]);
pixel[2] = pixel[2].wrapping_sub(pixel[1]);
}
// compute predictor transform
let row_bytes = width as usize * 4;
for y in (1..height as usize).rev() {
let (prev, current) =
pixels[(y - 1) * row_bytes..][..row_bytes * 2].split_at_mut(row_bytes);
for (c, p) in current.iter_mut().zip(prev) {
*c = c.wrapping_sub(*p);
}
}
for i in (4..row_bytes).rev() {
pixels[i] = pixels[i].wrapping_sub(pixels[i - 4]);
}
pixels[3] = pixels[3].wrapping_sub(255);
// compute frequencies
let mut frequencies0 = [0u32; 256];
let mut frequencies1 = [0u32; 280];
let mut frequencies2 = [0u32; 256];
let mut frequencies3 = [0u32; 256];
let mut it = pixels.chunks_exact(4).peekable();
match color {
ColorType::L8 => {
frequencies0[0] = 1;
frequencies2[0] = 1;
frequencies3[0] = 1;
while let Some(pixel) = it.next() {
frequencies1[pixel[1] as usize] += 1;
Self::count_run(pixel, &mut it, &mut frequencies1);
}
}
ColorType::La8 => {
frequencies0[0] = 1;
frequencies2[0] = 1;
while let Some(pixel) = it.next() {
frequencies1[pixel[1] as usize] += 1;
frequencies3[pixel[3] as usize] += 1;
Self::count_run(pixel, &mut it, &mut frequencies1);
}
}
ColorType::Rgb8 => {
frequencies3[0] = 1;
while let Some(pixel) = it.next() {
frequencies0[pixel[0] as usize] += 1;
frequencies1[pixel[1] as usize] += 1;
frequencies2[pixel[2] as usize] += 1;
Self::count_run(pixel, &mut it, &mut frequencies1);
}
}
ColorType::Rgba8 => {
while let Some(pixel) = it.next() {
frequencies0[pixel[0] as usize] += 1;
frequencies1[pixel[1] as usize] += 1;
frequencies2[pixel[2] as usize] += 1;
frequencies3[pixel[3] as usize] += 1;
Self::count_run(pixel, &mut it, &mut frequencies1);
}
}
_ => unreachable!(),
}
// compute and write huffman codes
let mut lengths0 = [0u8; 256];
let mut lengths1 = [0u8; 280];
let mut lengths2 = [0u8; 256];
let mut lengths3 = [0u8; 256];
let mut codes0 = [0u16; 256];
let mut codes1 = [0u16; 280];
let mut codes2 = [0u16; 256];
let mut codes3 = [0u16; 256];
self.write_huffman_tree(&frequencies1, &mut lengths1, &mut codes1)?;
if is_color {
self.write_huffman_tree(&frequencies0, &mut lengths0, &mut codes0)?;
self.write_huffman_tree(&frequencies2, &mut lengths2, &mut codes2)?;
} else {
self.write_single_entry_huffman_tree(0)?;
self.write_single_entry_huffman_tree(0)?;
}
if is_alpha {
self.write_huffman_tree(&frequencies3, &mut lengths3, &mut codes3)?;
} else {
self.write_single_entry_huffman_tree(0)?;
}
self.write_single_entry_huffman_tree(1)?;
// Write image data
let mut it = pixels.chunks_exact(4).peekable();
match color {
ColorType::L8 => {
while let Some(pixel) = it.next() {
self.write_bits(
codes1[pixel[1] as usize] as u64,
lengths1[pixel[1] as usize],
)?;
self.write_run(pixel, &mut it, &codes1, &lengths1)?;
}
}
ColorType::La8 => {
while let Some(pixel) = it.next() {
let len1 = lengths1[pixel[1] as usize];
let len3 = lengths3[pixel[3] as usize];
let code = codes1[pixel[1] as usize] as u64
| (codes3[pixel[3] as usize] as u64) << len1;
self.write_bits(code, len1 + len3)?;
self.write_run(pixel, &mut it, &codes1, &lengths1)?;
}
}
ColorType::Rgb8 => {
while let Some(pixel) = it.next() {
let len1 = lengths1[pixel[1] as usize];
let len0 = lengths0[pixel[0] as usize];
let len2 = lengths2[pixel[2] as usize];
let code = codes1[pixel[1] as usize] as u64
| (codes0[pixel[0] as usize] as u64) << len1
| (codes2[pixel[2] as usize] as u64) << (len1 + len0);
self.write_bits(code, len1 + len0 + len2)?;
self.write_run(pixel, &mut it, &codes1, &lengths1)?;
}
}
ColorType::Rgba8 => {
while let Some(pixel) = it.next() {
let len1 = lengths1[pixel[1] as usize];
let len0 = lengths0[pixel[0] as usize];
let len2 = lengths2[pixel[2] as usize];
let len3 = lengths3[pixel[3] as usize];
let code = codes1[pixel[1] as usize] as u64
| (codes0[pixel[0] as usize] as u64) << len1
| (codes2[pixel[2] as usize] as u64) << (len1 + len0)
| (codes3[pixel[3] as usize] as u64) << (len1 + len0 + len2);
self.write_bits(code, len1 + len0 + len2 + len3)?;
self.write_run(pixel, &mut it, &codes1, &lengths1)?;
}
}
_ => unreachable!(),
}
// flush writer
self.flush()?;
if self.chunk_buffer.len() % 2 == 1 {
self.chunk_buffer.push(0);
}
// write container
self.writer.write_all(b"RIFF")?;
self.writer
.write_all(&(self.chunk_buffer.len() as u32 + 12).to_le_bytes())?;
self.writer.write_all(b"WEBP")?;
self.writer.write_all(b"VP8L")?;
self.writer
.write_all(&(self.chunk_buffer.len() as u32).to_le_bytes())?;
self.writer.write_all(&self.chunk_buffer)?;
Ok(())
}
/// Encode image data with the indicated color type.
///
/// The encoder requires image data be Rgb8 or Rgba8.
@ -593,7 +40,24 @@ impl<W: Write> WebPEncoder<W> {
data.len(),
);
self.encode_lossless(data, width, height, color)
let color = match color {
ColorType::L8 => image_webp::ColorType::L8,
ColorType::La8 => image_webp::ColorType::La8,
ColorType::Rgb8 => image_webp::ColorType::Rgb8,
ColorType::Rgba8 => image_webp::ColorType::Rgba8,
_ => {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::WebP.into(),
UnsupportedErrorKind::Color(color.into()),
),
))
}
};
self.inner
.encode(data, width, height, color)
.map_err(ImageError::from_webp_encode)
}
}
@ -610,6 +74,15 @@ impl<W: Write> ImageEncoder for WebPEncoder<W> {
}
}
impl ImageError {
fn from_webp_encode(e: image_webp::EncodingError) -> Self {
match e {
image_webp::EncodingError::IoError(e) => ImageError::IoError(e),
_ => ImageError::Encoding(EncodingError::new(ImageFormat::WebP.into(), e)),
}
}
}
#[cfg(test)]
mod tests {
use crate::{ImageEncoder, RgbaImage};

View file

@ -1,860 +0,0 @@
use std::convert::TryInto;
use std::io::{self, Cursor, Error, Read, Seek};
use std::{error, fmt};
use super::decoder::{
read_chunk, read_fourcc, read_len_cursor, DecoderError::ChunkHeaderInvalid, WebPRiffChunk,
};
use super::lossless::{LosslessDecoder, LosslessFrame};
use super::vp8::{Frame as VP8Frame, Vp8Decoder};
use crate::error::{DecodingError, ParameterError, ParameterErrorKind};
use crate::image::ImageFormat;
use crate::{
ColorType, Delay, Frame, Frames, ImageError, ImageResult, Rgb, RgbImage, Rgba, RgbaImage,
};
use byteorder::{LittleEndian, ReadBytesExt};
//all errors that can occur while parsing extended chunks in a WebP file
#[derive(Debug, Clone, Copy)]
enum DecoderError {
// Some bits were invalid
InfoBitsInvalid { name: &'static str, value: u32 },
// Alpha chunk doesn't match the frame's size
AlphaChunkSizeMismatch,
// Image is too large, either for the platform's pointer size or generally
ImageTooLarge,
// Frame would go out of the canvas
FrameOutsideImage,
}
impl fmt::Display for DecoderError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
DecoderError::InfoBitsInvalid { name, value } => f.write_fmt(format_args!(
"Info bits `{}` invalid, received value: {}",
name, value
)),
DecoderError::AlphaChunkSizeMismatch => {
f.write_str("Alpha chunk doesn't match the size of the frame")
}
DecoderError::ImageTooLarge => f.write_str("Image is too large to be decoded"),
DecoderError::FrameOutsideImage => {
f.write_str("Frame is too large and would go outside the image")
}
}
}
}
impl From<DecoderError> for ImageError {
fn from(e: DecoderError) -> ImageError {
ImageError::Decoding(DecodingError::new(ImageFormat::WebP.into(), e))
}
}
impl error::Error for DecoderError {}
#[derive(Debug, Clone)]
pub(crate) struct WebPExtendedInfo {
_icc_profile: bool,
_alpha: bool,
_exif_metadata: bool,
_xmp_metadata: bool,
_animation: bool,
canvas_width: u32,
canvas_height: u32,
icc_profile: Option<Vec<u8>>,
}
#[derive(Debug)]
enum ExtendedImageData {
Animation {
frames: Vec<Vec<u8>>,
first_frame: AnimatedFrame,
anim_info: WebPAnimatedInfo,
},
Static(WebPStatic),
}
#[derive(Debug)]
pub(crate) struct ExtendedImage {
info: WebPExtendedInfo,
image: ExtendedImageData,
}
impl ExtendedImage {
pub(crate) fn dimensions(&self) -> (u32, u32) {
(self.info.canvas_width, self.info.canvas_height)
}
pub(crate) fn has_animation(&self) -> bool {
self.info._animation
}
pub(crate) fn icc_profile(&self) -> Option<Vec<u8>> {
self.info.icc_profile.clone()
}
pub(crate) fn color_type(&self) -> ColorType {
match &self.image {
ExtendedImageData::Animation { first_frame, .. } => &first_frame.image,
ExtendedImageData::Static(image) => image,
}
.color_type()
}
pub(crate) fn into_frames<'a>(self) -> Frames<'a> {
struct FrameIterator {
image: ExtendedImage,
index: usize,
canvas: RgbaImage,
}
impl Iterator for FrameIterator {
type Item = ImageResult<Frame>;
fn next(&mut self) -> Option<Self::Item> {
if let ExtendedImageData::Animation {
frames,
anim_info,
first_frame,
} = &self.image.image
{
let anim_frame_data = frames.get(self.index)?;
let anim_frame;
let frame;
if self.index == 0 {
// Use already decoded first frame
anim_frame = first_frame;
} else {
frame = read_anim_frame(
&mut Cursor::new(anim_frame_data),
self.image.info.canvas_width,
self.image.info.canvas_height,
)
.ok()?;
anim_frame = &frame;
};
self.index += 1;
ExtendedImage::draw_subimage(
&mut self.canvas,
anim_frame,
anim_info.background_color,
)
} else {
None
}
}
}
let width = self.info.canvas_width;
let height = self.info.canvas_height;
let background_color =
if let ExtendedImageData::Animation { ref anim_info, .. } = self.image {
anim_info.background_color
} else {
Rgba([0, 0, 0, 0])
};
let frame_iter = FrameIterator {
image: self,
index: 0,
canvas: RgbaImage::from_pixel(width, height, background_color),
};
Frames::new(Box::new(frame_iter))
}
pub(crate) fn read_extended_chunks<R: Read>(
reader: &mut R,
mut info: WebPExtendedInfo,
) -> ImageResult<ExtendedImage> {
let mut anim_info: Option<WebPAnimatedInfo> = None;
let mut anim_frames: Vec<Vec<u8>> = Vec::new();
let mut anim_first_frame: Option<AnimatedFrame> = None;
let mut static_frame: Option<WebPStatic> = None;
//go until end of file and while chunk headers are valid
while let Some((mut cursor, chunk)) = read_extended_chunk(reader)? {
match chunk {
WebPRiffChunk::EXIF | WebPRiffChunk::XMP => {
//ignore these chunks
}
WebPRiffChunk::ANIM => {
if anim_info.is_none() {
anim_info = Some(Self::read_anim_info(&mut cursor)?);
}
}
WebPRiffChunk::ANMF => {
let mut frame_data = Vec::new();
// Store first frame decoded to avoid decoding it for certain function calls
if anim_first_frame.is_none() {
anim_first_frame = Some(read_anim_frame(
&mut cursor,
info.canvas_width,
info.canvas_height,
)?);
cursor.rewind().unwrap();
}
cursor.read_to_end(&mut frame_data)?;
anim_frames.push(frame_data);
}
WebPRiffChunk::ALPH => {
if static_frame.is_none() {
let alpha_chunk =
read_alpha_chunk(&mut cursor, info.canvas_width, info.canvas_height)?;
let vp8_frame = read_lossy_with_chunk(reader)?;
let img = WebPStatic::from_alpha_lossy(alpha_chunk, vp8_frame)?;
static_frame = Some(img);
}
}
WebPRiffChunk::ICCP => {
let mut icc_profile = Vec::new();
cursor.read_to_end(&mut icc_profile)?;
info.icc_profile = Some(icc_profile);
}
WebPRiffChunk::VP8 => {
if static_frame.is_none() {
let vp8_frame = read_lossy(cursor)?;
let img = WebPStatic::from_lossy(vp8_frame)?;
static_frame = Some(img);
}
}
WebPRiffChunk::VP8L => {
if static_frame.is_none() {
let mut lossless_decoder = LosslessDecoder::new(cursor);
let frame = lossless_decoder.decode_frame()?;
let image = WebPStatic::Lossless(frame.clone());
static_frame = Some(image);
}
}
_ => return Err(ChunkHeaderInvalid(chunk.to_fourcc()).into()),
}
}
let image = if let (Some(anim_info), Some(first_frame)) = (anim_info, anim_first_frame) {
ExtendedImageData::Animation {
frames: anim_frames,
first_frame,
anim_info,
}
} else if let Some(frame) = static_frame {
ExtendedImageData::Static(frame)
} else {
//reached end of file too early before image data was reached
return Err(ImageError::IoError(Error::from(
io::ErrorKind::UnexpectedEof,
)));
};
let image = ExtendedImage { image, info };
Ok(image)
}
fn read_anim_info<R: Read>(reader: &mut R) -> ImageResult<WebPAnimatedInfo> {
let mut colors: [u8; 4] = [0; 4];
reader.read_exact(&mut colors)?;
//background color is [blue, green, red, alpha]
let background_color = Rgba([colors[2], colors[1], colors[0], colors[3]]);
let loop_count = reader.read_u16::<LittleEndian>()?;
let info = WebPAnimatedInfo {
background_color,
_loop_count: loop_count,
};
Ok(info)
}
fn draw_subimage(
canvas: &mut RgbaImage,
anim_image: &AnimatedFrame,
background_color: Rgba<u8>,
) -> Option<ImageResult<Frame>> {
let mut buffer = vec![0; anim_image.image.get_buf_size()];
anim_image.image.fill_buf(&mut buffer);
let has_alpha = anim_image.image.has_alpha();
let pixel_len: u32 = anim_image.image.color_type().bytes_per_pixel().into();
'x: for x in 0..anim_image.width {
for y in 0..anim_image.height {
let canvas_index: (u32, u32) = (x + anim_image.offset_x, y + anim_image.offset_y);
// Negative offsets are not possible due to unsigned ints
// If we go out of bounds by height, still continue by x
if canvas_index.1 >= canvas.height() {
continue 'x;
}
// If we go out of bounds by width, it doesn't make sense to continue at all
if canvas_index.0 >= canvas.width() {
break 'x;
}
let index: usize = ((y * anim_image.width + x) * pixel_len).try_into().unwrap();
canvas[canvas_index] = if anim_image.use_alpha_blending && has_alpha {
let buffer: [u8; 4] = buffer[index..][..4].try_into().unwrap();
ExtendedImage::do_alpha_blending(buffer, canvas[canvas_index])
} else {
Rgba([
buffer[index],
buffer[index + 1],
buffer[index + 2],
if has_alpha { buffer[index + 3] } else { 255 },
])
};
}
}
let delay = Delay::from_numer_denom_ms(anim_image.duration, 1);
let img = canvas.clone();
let frame = Frame::from_parts(img, 0, 0, delay);
if anim_image.dispose {
for x in 0..anim_image.width {
for y in 0..anim_image.height {
let canvas_index = (x + anim_image.offset_x, y + anim_image.offset_y);
canvas[canvas_index] = background_color;
}
}
}
Some(Ok(frame))
}
fn do_alpha_blending(buffer: [u8; 4], canvas: Rgba<u8>) -> Rgba<u8> {
let canvas_alpha = f64::from(canvas[3]);
let buffer_alpha = f64::from(buffer[3]);
let blend_alpha_f64 = buffer_alpha + canvas_alpha * (1.0 - buffer_alpha / 255.0);
//value should be between 0 and 255, this truncates the fractional part
let blend_alpha: u8 = blend_alpha_f64 as u8;
let blend_rgb: [u8; 3] = if blend_alpha == 0 {
[0, 0, 0]
} else {
let mut rgb = [0u8; 3];
for i in 0..3 {
let canvas_f64 = f64::from(canvas[i]);
let buffer_f64 = f64::from(buffer[i]);
let val = (buffer_f64 * buffer_alpha
+ canvas_f64 * canvas_alpha * (1.0 - buffer_alpha / 255.0))
/ blend_alpha_f64;
//value should be between 0 and 255, this truncates the fractional part
rgb[i] = val as u8;
}
rgb
};
Rgba([blend_rgb[0], blend_rgb[1], blend_rgb[2], blend_alpha])
}
pub(crate) fn fill_buf(&self, buf: &mut [u8]) {
match &self.image {
// will always have at least one frame
ExtendedImageData::Animation {
anim_info,
first_frame,
..
} => {
let (canvas_width, canvas_height) = self.dimensions();
if canvas_width == first_frame.width && canvas_height == first_frame.height {
first_frame.image.fill_buf(buf);
} else {
let bg_color = match &self.info._alpha {
true => Rgba::from([0, 0, 0, 0]),
false => anim_info.background_color,
};
let mut canvas = RgbaImage::from_pixel(canvas_width, canvas_height, bg_color);
let _ = ExtendedImage::draw_subimage(&mut canvas, first_frame, bg_color)
.unwrap()
.unwrap();
buf.copy_from_slice(canvas.into_raw().as_slice());
}
}
ExtendedImageData::Static(image) => {
image.fill_buf(buf);
}
}
}
pub(crate) fn set_background_color(&mut self, color: Rgba<u8>) -> ImageResult<()> {
match &mut self.image {
ExtendedImageData::Animation { anim_info, .. } => {
anim_info.background_color = color;
Ok(())
}
_ => Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::Generic(
"Background color can only be set on animated webp".to_owned(),
),
))),
}
}
}
#[derive(Debug)]
enum WebPStatic {
LossyWithAlpha(RgbaImage),
LossyWithoutAlpha(RgbImage),
Lossless(LosslessFrame),
}
impl WebPStatic {
pub(crate) fn from_alpha_lossy(
alpha: AlphaChunk,
vp8_frame: VP8Frame,
) -> ImageResult<WebPStatic> {
if alpha.data.len() != usize::from(vp8_frame.width) * usize::from(vp8_frame.height) {
return Err(DecoderError::AlphaChunkSizeMismatch.into());
}
let size = usize::from(vp8_frame.width).checked_mul(usize::from(vp8_frame.height) * 4);
let mut image_vec = match size {
Some(size) => vec![0u8; size],
None => return Err(DecoderError::ImageTooLarge.into()),
};
vp8_frame.fill_rgba(&mut image_vec);
for y in 0..vp8_frame.height {
for x in 0..vp8_frame.width {
let predictor: u8 = WebPStatic::get_predictor(
x.into(),
y.into(),
vp8_frame.width.into(),
alpha.filtering_method,
&image_vec,
);
let predictor = u16::from(predictor);
let alpha_index = usize::from(y) * usize::from(vp8_frame.width) + usize::from(x);
let alpha_val = alpha.data[alpha_index];
let alpha: u8 = ((predictor + u16::from(alpha_val)) % 256)
.try_into()
.unwrap();
let alpha_index = alpha_index * 4 + 3;
image_vec[alpha_index] = alpha;
}
}
let image = RgbaImage::from_vec(vp8_frame.width.into(), vp8_frame.height.into(), image_vec)
.unwrap();
Ok(WebPStatic::LossyWithAlpha(image))
}
fn get_predictor(
x: usize,
y: usize,
width: usize,
filtering_method: FilteringMethod,
image_slice: &[u8],
) -> u8 {
match filtering_method {
FilteringMethod::None => 0,
FilteringMethod::Horizontal => {
if x == 0 && y == 0 {
0
} else if x == 0 {
let index = (y - 1) * width + x;
image_slice[index * 4 + 3]
} else {
let index = y * width + x - 1;
image_slice[index * 4 + 3]
}
}
FilteringMethod::Vertical => {
if x == 0 && y == 0 {
0
} else if y == 0 {
let index = y * width + x - 1;
image_slice[index * 4 + 3]
} else {
let index = (y - 1) * width + x;
image_slice[index * 4 + 3]
}
}
FilteringMethod::Gradient => {
let (left, top, top_left) = match (x, y) {
(0, 0) => (0, 0, 0),
(0, y) => {
let above_index = (y - 1) * width + x;
let val = image_slice[above_index * 4 + 3];
(val, val, val)
}
(x, 0) => {
let before_index = y * width + x - 1;
let val = image_slice[before_index * 4 + 3];
(val, val, val)
}
(x, y) => {
let left_index = y * width + x - 1;
let left = image_slice[left_index * 4 + 3];
let top_index = (y - 1) * width + x;
let top = image_slice[top_index * 4 + 3];
let top_left_index = (y - 1) * width + x - 1;
let top_left = image_slice[top_left_index * 4 + 3];
(left, top, top_left)
}
};
let combination = i16::from(left) + i16::from(top) - i16::from(top_left);
i16::clamp(combination, 0, 255).try_into().unwrap()
}
}
}
pub(crate) fn from_lossy(vp8_frame: VP8Frame) -> ImageResult<WebPStatic> {
let mut image = RgbImage::from_pixel(
vp8_frame.width.into(),
vp8_frame.height.into(),
Rgb([0, 0, 0]),
);
vp8_frame.fill_rgb(&mut image);
Ok(WebPStatic::LossyWithoutAlpha(image))
}
pub(crate) fn fill_buf(&self, buf: &mut [u8]) {
match self {
WebPStatic::LossyWithAlpha(image) => {
buf.copy_from_slice(image);
}
WebPStatic::LossyWithoutAlpha(image) => {
buf.copy_from_slice(image);
}
WebPStatic::Lossless(lossless) => {
lossless.fill_rgba(buf);
}
}
}
pub(crate) fn get_buf_size(&self) -> usize {
match self {
WebPStatic::LossyWithAlpha(rgb_image) => rgb_image.len(),
WebPStatic::LossyWithoutAlpha(rgba_image) => rgba_image.len(),
WebPStatic::Lossless(lossless) => lossless.get_buf_size(),
}
}
pub(crate) fn color_type(&self) -> ColorType {
if self.has_alpha() {
ColorType::Rgba8
} else {
ColorType::Rgb8
}
}
pub(crate) fn has_alpha(&self) -> bool {
match self {
Self::LossyWithAlpha(..) | Self::Lossless(..) => true,
Self::LossyWithoutAlpha(..) => false,
}
}
}
#[derive(Debug)]
struct WebPAnimatedInfo {
background_color: Rgba<u8>,
_loop_count: u16,
}
#[derive(Debug)]
struct AnimatedFrame {
offset_x: u32,
offset_y: u32,
width: u32,
height: u32,
duration: u32,
use_alpha_blending: bool,
dispose: bool,
image: WebPStatic,
}
/// Reads a chunk, but silently ignores unknown chunks at the end of a file
fn read_extended_chunk<R>(r: &mut R) -> ImageResult<Option<(Cursor<Vec<u8>>, WebPRiffChunk)>>
where
R: Read,
{
let mut unknown_chunk = Ok(());
while let Some(chunk) = read_fourcc(r)? {
let cursor = read_len_cursor(r)?;
match chunk {
Ok(chunk) => return unknown_chunk.and(Ok(Some((cursor, chunk)))),
Err(err) => unknown_chunk = unknown_chunk.and(Err(err)),
}
}
Ok(None)
}
pub(crate) fn read_extended_header<R: Read>(reader: &mut R) -> ImageResult<WebPExtendedInfo> {
let chunk_flags = reader.read_u8()?;
let reserved_first = chunk_flags & 0b11000000;
let icc_profile = chunk_flags & 0b00100000 != 0;
let alpha = chunk_flags & 0b00010000 != 0;
let exif_metadata = chunk_flags & 0b00001000 != 0;
let xmp_metadata = chunk_flags & 0b00000100 != 0;
let animation = chunk_flags & 0b00000010 != 0;
let reserved_second = chunk_flags & 0b00000001;
let reserved_third = read_3_bytes(reader)?;
if reserved_first != 0 || reserved_second != 0 || reserved_third != 0 {
let value: u32 = if reserved_first != 0 {
reserved_first.into()
} else if reserved_second != 0 {
reserved_second.into()
} else {
reserved_third
};
return Err(DecoderError::InfoBitsInvalid {
name: "reserved",
value,
}
.into());
}
let canvas_width = read_3_bytes(reader)? + 1;
let canvas_height = read_3_bytes(reader)? + 1;
//product of canvas dimensions cannot be larger than u32 max
if u32::checked_mul(canvas_width, canvas_height).is_none() {
return Err(DecoderError::ImageTooLarge.into());
}
let info = WebPExtendedInfo {
_icc_profile: icc_profile,
_alpha: alpha,
_exif_metadata: exif_metadata,
_xmp_metadata: xmp_metadata,
_animation: animation,
canvas_width,
canvas_height,
icc_profile: None,
};
Ok(info)
}
fn read_anim_frame<R: Read>(
mut reader: R,
canvas_width: u32,
canvas_height: u32,
) -> ImageResult<AnimatedFrame> {
//offsets for the frames are twice the values
let frame_x = read_3_bytes(&mut reader)? * 2;
let frame_y = read_3_bytes(&mut reader)? * 2;
let frame_width = read_3_bytes(&mut reader)? + 1;
let frame_height = read_3_bytes(&mut reader)? + 1;
if frame_x + frame_width > canvas_width || frame_y + frame_height > canvas_height {
return Err(DecoderError::FrameOutsideImage.into());
}
let duration = read_3_bytes(&mut reader)?;
let frame_info = reader.read_u8()?;
let reserved = frame_info & 0b11111100;
if reserved != 0 {
return Err(DecoderError::InfoBitsInvalid {
name: "reserved",
value: reserved.into(),
}
.into());
}
let use_alpha_blending = frame_info & 0b00000010 == 0;
let dispose = frame_info & 0b00000001 != 0;
//read normal bitstream now
let static_image = read_image(&mut reader, frame_width, frame_height)?;
let frame = AnimatedFrame {
offset_x: frame_x,
offset_y: frame_y,
width: frame_width,
height: frame_height,
duration,
use_alpha_blending,
dispose,
image: static_image,
};
Ok(frame)
}
fn read_3_bytes<R: Read>(reader: &mut R) -> ImageResult<u32> {
let mut buffer: [u8; 3] = [0; 3];
reader.read_exact(&mut buffer)?;
let value: u32 =
(u32::from(buffer[2]) << 16) | (u32::from(buffer[1]) << 8) | u32::from(buffer[0]);
Ok(value)
}
fn read_lossy_with_chunk<R: Read>(reader: &mut R) -> ImageResult<VP8Frame> {
let (cursor, chunk) =
read_chunk(reader)?.ok_or_else(|| Error::from(io::ErrorKind::UnexpectedEof))?;
if chunk != WebPRiffChunk::VP8 {
return Err(ChunkHeaderInvalid(chunk.to_fourcc()).into());
}
read_lossy(cursor)
}
fn read_lossy(cursor: Cursor<Vec<u8>>) -> ImageResult<VP8Frame> {
let mut vp8_decoder = Vp8Decoder::new(cursor);
let frame = vp8_decoder.decode_frame()?;
Ok(frame.clone())
}
fn read_image<R: Read>(reader: &mut R, width: u32, height: u32) -> ImageResult<WebPStatic> {
let chunk = read_chunk(reader)?;
match chunk {
Some((cursor, WebPRiffChunk::VP8)) => {
let mut vp8_decoder = Vp8Decoder::new(cursor);
let frame = vp8_decoder.decode_frame()?;
let img = WebPStatic::from_lossy(frame.clone())?;
Ok(img)
}
Some((cursor, WebPRiffChunk::VP8L)) => {
let mut lossless_decoder = LosslessDecoder::new(cursor);
let frame = lossless_decoder.decode_frame()?;
let img = WebPStatic::Lossless(frame.clone());
Ok(img)
}
Some((mut cursor, WebPRiffChunk::ALPH)) => {
let alpha_chunk = read_alpha_chunk(&mut cursor, width, height)?;
let vp8_frame = read_lossy_with_chunk(reader)?;
let img = WebPStatic::from_alpha_lossy(alpha_chunk, vp8_frame)?;
Ok(img)
}
None => Err(ImageError::IoError(Error::from(
io::ErrorKind::UnexpectedEof,
))),
Some((_, chunk)) => Err(ChunkHeaderInvalid(chunk.to_fourcc()).into()),
}
}
#[derive(Debug)]
struct AlphaChunk {
_preprocessing: bool,
filtering_method: FilteringMethod,
data: Vec<u8>,
}
#[derive(Debug, Copy, Clone)]
enum FilteringMethod {
None,
Horizontal,
Vertical,
Gradient,
}
fn read_alpha_chunk<R: Read>(reader: &mut R, width: u32, height: u32) -> ImageResult<AlphaChunk> {
let info_byte = reader.read_u8()?;
let reserved = info_byte & 0b11000000;
let preprocessing = (info_byte & 0b00110000) >> 4;
let filtering = (info_byte & 0b00001100) >> 2;
let compression = info_byte & 0b00000011;
if reserved != 0 {
return Err(DecoderError::InfoBitsInvalid {
name: "reserved",
value: reserved.into(),
}
.into());
}
let preprocessing = match preprocessing {
0 => false,
1 => true,
_ => {
return Err(DecoderError::InfoBitsInvalid {
name: "reserved",
value: preprocessing.into(),
}
.into())
}
};
let filtering_method = match filtering {
0 => FilteringMethod::None,
1 => FilteringMethod::Horizontal,
2 => FilteringMethod::Vertical,
3 => FilteringMethod::Gradient,
_ => unreachable!(),
};
let lossless_compression = match compression {
0 => false,
1 => true,
_ => {
return Err(DecoderError::InfoBitsInvalid {
name: "lossless compression",
value: compression.into(),
}
.into())
}
};
let mut framedata = Vec::new();
reader.read_to_end(&mut framedata)?;
let data = if lossless_compression {
let cursor = io::Cursor::new(framedata);
let mut decoder = LosslessDecoder::new(cursor);
//this is a potential problem for large images; would require rewriting lossless decoder to use u32 for width and height
let width: u16 = width
.try_into()
.map_err(|_| ImageError::from(DecoderError::ImageTooLarge))?;
let height: u16 = height
.try_into()
.map_err(|_| ImageError::from(DecoderError::ImageTooLarge))?;
let frame = decoder.decode_frame_implicit_dims(width, height)?;
let mut data = vec![0u8; usize::from(width) * usize::from(height)];
frame.fill_green(&mut data);
data
} else {
framedata
};
let chunk = AlphaChunk {
_preprocessing: preprocessing,
filtering_method,
data,
};
Ok(chunk)
}

View file

@ -1,202 +0,0 @@
use std::convert::TryInto;
use super::lossless::BitReader;
use super::lossless::DecoderError;
use crate::ImageResult;
/// Rudimentary utility for reading Canonical Huffman Codes.
/// Based off https://github.com/webmproject/libwebp/blob/7f8472a610b61ec780ef0a8873cd954ac512a505/src/utils/huffman.c
///
const MAX_ALLOWED_CODE_LENGTH: usize = 15;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum HuffmanTreeNode {
Branch(usize), //offset in vector to children
Leaf(u16), //symbol stored in leaf
Empty,
}
/// Huffman tree
#[derive(Clone, Debug, Default)]
pub(crate) struct HuffmanTree {
tree: Vec<HuffmanTreeNode>,
max_nodes: usize,
num_nodes: usize,
}
impl HuffmanTree {
fn is_full(&self) -> bool {
self.num_nodes == self.max_nodes
}
/// Turns a node from empty into a branch and assigns its children
fn assign_children(&mut self, node_index: usize) -> usize {
let offset_index = self.num_nodes - node_index;
self.tree[node_index] = HuffmanTreeNode::Branch(offset_index);
self.num_nodes += 2;
offset_index
}
/// Init a huffman tree
fn init(num_leaves: usize) -> ImageResult<HuffmanTree> {
if num_leaves == 0 {
return Err(DecoderError::HuffmanError.into());
}
let max_nodes = 2 * num_leaves - 1;
let tree = vec![HuffmanTreeNode::Empty; max_nodes];
let num_nodes = 1;
let tree = HuffmanTree {
tree,
max_nodes,
num_nodes,
};
Ok(tree)
}
/// Converts code lengths to codes
fn code_lengths_to_codes(code_lengths: &[u16]) -> ImageResult<Vec<Option<u16>>> {
let max_code_length = *code_lengths
.iter()
.reduce(|a, b| if a >= b { a } else { b })
.unwrap();
if max_code_length > MAX_ALLOWED_CODE_LENGTH.try_into().unwrap() {
return Err(DecoderError::HuffmanError.into());
}
let mut code_length_hist = [0; MAX_ALLOWED_CODE_LENGTH + 1];
for &length in code_lengths.iter() {
code_length_hist[usize::from(length)] += 1;
}
code_length_hist[0] = 0;
let mut curr_code = 0;
let mut next_codes = [None; MAX_ALLOWED_CODE_LENGTH + 1];
for code_len in 1..=usize::from(max_code_length) {
curr_code = (curr_code + code_length_hist[code_len - 1]) << 1;
next_codes[code_len] = Some(curr_code);
}
let mut huff_codes = vec![None; code_lengths.len()];
for (symbol, &length) in code_lengths.iter().enumerate() {
let length = usize::from(length);
if length > 0 {
huff_codes[symbol] = next_codes[length];
if let Some(value) = next_codes[length].as_mut() {
*value += 1;
}
} else {
huff_codes[symbol] = None;
}
}
Ok(huff_codes)
}
/// Adds a symbol to a huffman tree
fn add_symbol(&mut self, symbol: u16, code: u16, code_length: u16) -> ImageResult<()> {
let mut node_index = 0;
let code = usize::from(code);
for length in (0..code_length).rev() {
if node_index >= self.max_nodes {
return Err(DecoderError::HuffmanError.into());
}
let node = self.tree[node_index];
let offset = match node {
HuffmanTreeNode::Empty => {
if self.is_full() {
return Err(DecoderError::HuffmanError.into());
}
self.assign_children(node_index)
}
HuffmanTreeNode::Leaf(_) => return Err(DecoderError::HuffmanError.into()),
HuffmanTreeNode::Branch(offset) => offset,
};
node_index += offset + ((code >> length) & 1);
}
match self.tree[node_index] {
HuffmanTreeNode::Empty => self.tree[node_index] = HuffmanTreeNode::Leaf(symbol),
HuffmanTreeNode::Leaf(_) => return Err(DecoderError::HuffmanError.into()),
HuffmanTreeNode::Branch(_offset) => return Err(DecoderError::HuffmanError.into()),
}
Ok(())
}
/// Builds a tree implicitly, just from code lengths
pub(crate) fn build_implicit(code_lengths: Vec<u16>) -> ImageResult<HuffmanTree> {
let mut num_symbols = 0;
let mut root_symbol = 0;
for (symbol, length) in code_lengths.iter().enumerate() {
if *length > 0 {
num_symbols += 1;
root_symbol = symbol.try_into().unwrap();
}
}
let mut tree = HuffmanTree::init(num_symbols)?;
if num_symbols == 1 {
tree.add_symbol(root_symbol, 0, 0)?;
} else {
let codes = HuffmanTree::code_lengths_to_codes(&code_lengths)?;
for (symbol, &length) in code_lengths.iter().enumerate() {
if length > 0 && codes[symbol].is_some() {
tree.add_symbol(symbol.try_into().unwrap(), codes[symbol].unwrap(), length)?;
}
}
}
Ok(tree)
}
/// Builds a tree explicitly from lengths, codes and symbols
pub(crate) fn build_explicit(
code_lengths: Vec<u16>,
codes: Vec<u16>,
symbols: Vec<u16>,
) -> ImageResult<HuffmanTree> {
let mut tree = HuffmanTree::init(symbols.len())?;
for i in 0..symbols.len() {
tree.add_symbol(symbols[i], codes[i], code_lengths[i])?;
}
Ok(tree)
}
/// Reads a symbol using the bitstream
pub(crate) fn read_symbol(&self, bit_reader: &mut BitReader) -> ImageResult<u16> {
let mut index = 0;
let mut node = self.tree[index];
while let HuffmanTreeNode::Branch(children_offset) = node {
index += children_offset + bit_reader.read_bits::<usize>(1)?;
node = self.tree[index];
}
let symbol = match node {
HuffmanTreeNode::Branch(_) => unreachable!(),
HuffmanTreeNode::Empty => return Err(DecoderError::HuffmanError.into()),
HuffmanTreeNode::Leaf(symbol) => symbol,
};
Ok(symbol)
}
}

View file

@ -1,147 +0,0 @@
//! Does loop filtering on webp lossy images
use crate::utils::clamp;
#[inline]
fn c(val: i32) -> i32 {
clamp(val, -128, 127)
}
//unsigned to signed
#[inline]
fn u2s(val: u8) -> i32 {
i32::from(val) - 128
}
//signed to unsigned
#[inline]
fn s2u(val: i32) -> u8 {
(c(val) + 128) as u8
}
#[inline]
fn diff(val1: u8, val2: u8) -> u8 {
if val1 > val2 {
val1 - val2
} else {
val2 - val1
}
}
//15.2
fn common_adjust(use_outer_taps: bool, pixels: &mut [u8], point: usize, stride: usize) -> i32 {
let p1 = u2s(pixels[point - 2 * stride]);
let p0 = u2s(pixels[point - stride]);
let q0 = u2s(pixels[point]);
let q1 = u2s(pixels[point + stride]);
//value for the outer 2 pixels
let outer = if use_outer_taps { c(p1 - q1) } else { 0 };
let mut a = c(outer + 3 * (q0 - p0));
let b = (c(a + 3)) >> 3;
a = (c(a + 4)) >> 3;
pixels[point] = s2u(q0 - a);
pixels[point - stride] = s2u(p0 + b);
a
}
fn simple_threshold(filter_limit: i32, pixels: &[u8], point: usize, stride: usize) -> bool {
i32::from(diff(pixels[point - stride], pixels[point])) * 2
+ i32::from(diff(pixels[point - 2 * stride], pixels[point + stride])) / 2
<= filter_limit
}
fn should_filter(
interior_limit: u8,
edge_limit: u8,
pixels: &[u8],
point: usize,
stride: usize,
) -> bool {
simple_threshold(i32::from(edge_limit), pixels, point, stride)
&& diff(pixels[point - 4 * stride], pixels[point - 3 * stride]) <= interior_limit
&& diff(pixels[point - 3 * stride], pixels[point - 2 * stride]) <= interior_limit
&& diff(pixels[point - 2 * stride], pixels[point - stride]) <= interior_limit
&& diff(pixels[point + 3 * stride], pixels[point + 2 * stride]) <= interior_limit
&& diff(pixels[point + 2 * stride], pixels[point + stride]) <= interior_limit
&& diff(pixels[point + stride], pixels[point]) <= interior_limit
}
fn high_edge_variance(threshold: u8, pixels: &[u8], point: usize, stride: usize) -> bool {
diff(pixels[point - 2 * stride], pixels[point - stride]) > threshold
|| diff(pixels[point + stride], pixels[point]) > threshold
}
//simple filter
//effects 4 pixels on an edge(2 each side)
pub(crate) fn simple_segment(edge_limit: u8, pixels: &mut [u8], point: usize, stride: usize) {
if simple_threshold(i32::from(edge_limit), pixels, point, stride) {
common_adjust(true, pixels, point, stride);
}
}
//normal filter
//works on the 8 pixels on the edges between subblocks inside a macroblock
pub(crate) fn subblock_filter(
hev_threshold: u8,
interior_limit: u8,
edge_limit: u8,
pixels: &mut [u8],
point: usize,
stride: usize,
) {
if should_filter(interior_limit, edge_limit, pixels, point, stride) {
let hv = high_edge_variance(hev_threshold, pixels, point, stride);
let a = (common_adjust(hv, pixels, point, stride) + 1) >> 1;
if !hv {
pixels[point + stride] = s2u(u2s(pixels[point + stride]) - a);
pixels[point - 2 * stride] = s2u(u2s(pixels[point - 2 * stride]) - a);
}
}
}
//normal filter
//works on the 8 pixels on the edges between macroblocks
pub(crate) fn macroblock_filter(
hev_threshold: u8,
interior_limit: u8,
edge_limit: u8,
pixels: &mut [u8],
point: usize,
stride: usize,
) {
let mut spixels = [0i32; 8];
for i in 0..8 {
spixels[i] = u2s(pixels[point + i * stride - 4 * stride]);
}
if should_filter(interior_limit, edge_limit, pixels, point, stride) {
if !high_edge_variance(hev_threshold, pixels, point, stride) {
let w = c(c(spixels[2] - spixels[5]) + 3 * (spixels[4] - spixels[3]));
let mut a = c((27 * w + 63) >> 7);
pixels[point] = s2u(spixels[4] - a);
pixels[point - stride] = s2u(spixels[3] + a);
a = c((18 * w + 63) >> 7);
pixels[point + stride] = s2u(spixels[5] - a);
pixels[point - 2 * stride] = s2u(spixels[2] + a);
a = c((9 * w + 63) >> 7);
pixels[point + 2 * stride] = s2u(spixels[6] - a);
pixels[point - 3 * stride] = s2u(spixels[1] + a);
} else {
common_adjust(true, pixels, point, stride);
}
}
}

View file

@ -1,783 +0,0 @@
//! Decoding of lossless WebP images
//!
//! [Lossless spec](https://developers.google.com/speed/webp/docs/webp_lossless_bitstream_specification)
//!
use std::{
convert::TryFrom,
convert::TryInto,
error, fmt,
io::Read,
ops::{AddAssign, Shl},
};
use byteorder::ReadBytesExt;
use crate::{error::DecodingError, ImageError, ImageFormat, ImageResult};
use super::huffman::HuffmanTree;
use super::lossless_transform::{add_pixels, TransformType};
const CODE_LENGTH_CODES: usize = 19;
const CODE_LENGTH_CODE_ORDER: [usize; CODE_LENGTH_CODES] = [
17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
];
#[rustfmt::skip]
const DISTANCE_MAP: [(i8, i8); 120] = [
(0, 1), (1, 0), (1, 1), (-1, 1), (0, 2), (2, 0), (1, 2), (-1, 2),
(2, 1), (-2, 1), (2, 2), (-2, 2), (0, 3), (3, 0), (1, 3), (-1, 3),
(3, 1), (-3, 1), (2, 3), (-2, 3), (3, 2), (-3, 2), (0, 4), (4, 0),
(1, 4), (-1, 4), (4, 1), (-4, 1), (3, 3), (-3, 3), (2, 4), (-2, 4),
(4, 2), (-4, 2), (0, 5), (3, 4), (-3, 4), (4, 3), (-4, 3), (5, 0),
(1, 5), (-1, 5), (5, 1), (-5, 1), (2, 5), (-2, 5), (5, 2), (-5, 2),
(4, 4), (-4, 4), (3, 5), (-3, 5), (5, 3), (-5, 3), (0, 6), (6, 0),
(1, 6), (-1, 6), (6, 1), (-6, 1), (2, 6), (-2, 6), (6, 2), (-6, 2),
(4, 5), (-4, 5), (5, 4), (-5, 4), (3, 6), (-3, 6), (6, 3), (-6, 3),
(0, 7), (7, 0), (1, 7), (-1, 7), (5, 5), (-5, 5), (7, 1), (-7, 1),
(4, 6), (-4, 6), (6, 4), (-6, 4), (2, 7), (-2, 7), (7, 2), (-7, 2),
(3, 7), (-3, 7), (7, 3), (-7, 3), (5, 6), (-5, 6), (6, 5), (-6, 5),
(8, 0), (4, 7), (-4, 7), (7, 4), (-7, 4), (8, 1), (8, 2), (6, 6),
(-6, 6), (8, 3), (5, 7), (-5, 7), (7, 5), (-7, 5), (8, 4), (6, 7),
(-6, 7), (7, 6), (-7, 6), (8, 5), (7, 7), (-7, 7), (8, 6), (8, 7)
];
const GREEN: usize = 0;
const RED: usize = 1;
const BLUE: usize = 2;
const ALPHA: usize = 3;
const DIST: usize = 4;
const HUFFMAN_CODES_PER_META_CODE: usize = 5;
type HuffmanCodeGroup = [HuffmanTree; HUFFMAN_CODES_PER_META_CODE];
const ALPHABET_SIZE: [u16; HUFFMAN_CODES_PER_META_CODE] = [256 + 24, 256, 256, 256, 40];
#[inline]
pub(crate) fn subsample_size(size: u16, bits: u8) -> u16 {
((u32::from(size) + (1u32 << bits) - 1) >> bits)
.try_into()
.unwrap()
}
#[derive(Debug, Clone, Copy)]
pub(crate) enum DecoderError {
/// Signature of 0x2f not found
LosslessSignatureInvalid(u8),
/// Version Number must be 0
VersionNumberInvalid(u8),
///
InvalidColorCacheBits(u8),
HuffmanError,
BitStreamError,
TransformError,
}
impl fmt::Display for DecoderError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
DecoderError::LosslessSignatureInvalid(sig) => {
f.write_fmt(format_args!("Invalid lossless signature: {}", sig))
}
DecoderError::VersionNumberInvalid(num) => {
f.write_fmt(format_args!("Invalid version number: {}", num))
}
DecoderError::InvalidColorCacheBits(num) => f.write_fmt(format_args!(
"Invalid color cache(must be between 1-11): {}",
num
)),
DecoderError::HuffmanError => f.write_fmt(format_args!("Error building Huffman Tree")),
DecoderError::BitStreamError => {
f.write_fmt(format_args!("Error while reading bitstream"))
}
DecoderError::TransformError => {
f.write_fmt(format_args!("Error while reading or writing transforms"))
}
}
}
}
impl From<DecoderError> for ImageError {
fn from(e: DecoderError) -> ImageError {
ImageError::Decoding(DecodingError::new(ImageFormat::WebP.into(), e))
}
}
impl error::Error for DecoderError {}
const NUM_TRANSFORM_TYPES: usize = 4;
//Decodes lossless WebP images
#[derive(Debug)]
pub(crate) struct LosslessDecoder<R> {
r: R,
bit_reader: BitReader,
frame: LosslessFrame,
transforms: [Option<TransformType>; NUM_TRANSFORM_TYPES],
transform_order: Vec<u8>,
}
impl<R: Read> LosslessDecoder<R> {
/// Create a new decoder
pub(crate) fn new(r: R) -> LosslessDecoder<R> {
LosslessDecoder {
r,
bit_reader: BitReader::new(),
frame: Default::default(),
transforms: [None, None, None, None],
transform_order: Vec::new(),
}
}
/// Reads the frame
pub(crate) fn decode_frame(&mut self) -> ImageResult<&LosslessFrame> {
let signature = self.r.read_u8()?;
if signature != 0x2f {
return Err(DecoderError::LosslessSignatureInvalid(signature).into());
}
let mut buf = Vec::new();
self.r.read_to_end(&mut buf)?;
self.bit_reader.init(buf);
self.frame.width = self.bit_reader.read_bits::<u16>(14)? + 1;
self.frame.height = self.bit_reader.read_bits::<u16>(14)? + 1;
let _alpha_used = self.bit_reader.read_bits::<u8>(1)?;
let version_num = self.bit_reader.read_bits::<u8>(3)?;
if version_num != 0 {
return Err(DecoderError::VersionNumberInvalid(version_num).into());
}
let mut data = self.decode_image_stream(self.frame.width, self.frame.height, true)?;
for &trans_index in self.transform_order.iter().rev() {
let trans = self.transforms[usize::from(trans_index)].as_ref().unwrap();
trans.apply_transform(&mut data, self.frame.width, self.frame.height)?;
}
self.frame.buf = data;
Ok(&self.frame)
}
//used for alpha data in extended decoding
pub(crate) fn decode_frame_implicit_dims(
&mut self,
width: u16,
height: u16,
) -> ImageResult<&LosslessFrame> {
let mut buf = Vec::new();
self.r.read_to_end(&mut buf)?;
self.bit_reader.init(buf);
self.frame.width = width;
self.frame.height = height;
let mut data = self.decode_image_stream(self.frame.width, self.frame.height, true)?;
//transform_order is vector of indices(0-3) into transforms in order decoded
for &trans_index in self.transform_order.iter().rev() {
let trans = self.transforms[usize::from(trans_index)].as_ref().unwrap();
trans.apply_transform(&mut data, self.frame.width, self.frame.height)?;
}
self.frame.buf = data;
Ok(&self.frame)
}
/// Reads Image data from the bitstream
/// Can be in any of the 5 roles described in the Specification
/// ARGB Image role has different behaviour to the other 4
/// xsize and ysize describe the size of the blocks where each block has its own entropy code
fn decode_image_stream(
&mut self,
xsize: u16,
ysize: u16,
is_argb_img: bool,
) -> ImageResult<Vec<u32>> {
let trans_xsize = if is_argb_img {
self.read_transforms()?
} else {
xsize
};
let color_cache_bits = self.read_color_cache()?;
let color_cache = color_cache_bits.map(|bits| {
let size = 1 << bits;
let cache = vec![0u32; size];
ColorCache {
color_cache_bits: bits,
color_cache: cache,
}
});
let huffman_info = self.read_huffman_codes(is_argb_img, trans_xsize, ysize, color_cache)?;
//decode data
let data = self.decode_image_data(trans_xsize, ysize, huffman_info)?;
Ok(data)
}
/// Reads transforms and their data from the bitstream
fn read_transforms(&mut self) -> ImageResult<u16> {
let mut xsize = self.frame.width;
while self.bit_reader.read_bits::<u8>(1)? == 1 {
let transform_type_val = self.bit_reader.read_bits::<u8>(2)?;
if self.transforms[usize::from(transform_type_val)].is_some() {
//can only have one of each transform, error
return Err(DecoderError::TransformError.into());
}
self.transform_order.push(transform_type_val);
let transform_type = match transform_type_val {
0 => {
//predictor
let size_bits = self.bit_reader.read_bits::<u8>(3)? + 2;
let block_xsize = subsample_size(xsize, size_bits);
let block_ysize = subsample_size(self.frame.height, size_bits);
let data = self.decode_image_stream(block_xsize, block_ysize, false)?;
TransformType::PredictorTransform {
size_bits,
predictor_data: data,
}
}
1 => {
//color transform
let size_bits = self.bit_reader.read_bits::<u8>(3)? + 2;
let block_xsize = subsample_size(xsize, size_bits);
let block_ysize = subsample_size(self.frame.height, size_bits);
let data = self.decode_image_stream(block_xsize, block_ysize, false)?;
TransformType::ColorTransform {
size_bits,
transform_data: data,
}
}
2 => {
//subtract green
TransformType::SubtractGreen
}
3 => {
let color_table_size = self.bit_reader.read_bits::<u16>(8)? + 1;
let mut color_map = self.decode_image_stream(color_table_size, 1, false)?;
let bits = if color_table_size <= 2 {
3
} else if color_table_size <= 4 {
2
} else if color_table_size <= 16 {
1
} else {
0
};
xsize = subsample_size(xsize, bits);
Self::adjust_color_map(&mut color_map);
TransformType::ColorIndexingTransform {
table_size: color_table_size,
table_data: color_map,
}
}
_ => unreachable!(),
};
self.transforms[usize::from(transform_type_val)] = Some(transform_type);
}
Ok(xsize)
}
/// Adjusts the color map since it's subtraction coded
fn adjust_color_map(color_map: &mut [u32]) {
for i in 1..color_map.len() {
color_map[i] = add_pixels(color_map[i], color_map[i - 1]);
}
}
/// Reads huffman codes associated with an image
fn read_huffman_codes(
&mut self,
read_meta: bool,
xsize: u16,
ysize: u16,
color_cache: Option<ColorCache>,
) -> ImageResult<HuffmanInfo> {
let mut num_huff_groups = 1;
let mut huffman_bits = 0;
let mut huffman_xsize = 1;
let mut huffman_ysize = 1;
let mut entropy_image = Vec::new();
if read_meta && self.bit_reader.read_bits::<u8>(1)? == 1 {
//meta huffman codes
huffman_bits = self.bit_reader.read_bits::<u8>(3)? + 2;
huffman_xsize = subsample_size(xsize, huffman_bits);
huffman_ysize = subsample_size(ysize, huffman_bits);
entropy_image = self.decode_image_stream(huffman_xsize, huffman_ysize, false)?;
for pixel in entropy_image.iter_mut() {
let meta_huff_code = (*pixel >> 8) & 0xffff;
*pixel = meta_huff_code;
if meta_huff_code >= num_huff_groups {
num_huff_groups = meta_huff_code + 1;
}
}
}
let mut hufftree_groups = Vec::new();
for _i in 0..num_huff_groups {
let mut group: HuffmanCodeGroup = Default::default();
for j in 0..HUFFMAN_CODES_PER_META_CODE {
let mut alphabet_size = ALPHABET_SIZE[j];
if j == 0 {
if let Some(color_cache) = color_cache.as_ref() {
alphabet_size += 1 << color_cache.color_cache_bits;
}
}
let tree = self.read_huffman_code(alphabet_size)?;
group[j] = tree;
}
hufftree_groups.push(group);
}
let huffman_mask = if huffman_bits == 0 {
!0
} else {
(1 << huffman_bits) - 1
};
let info = HuffmanInfo {
xsize: huffman_xsize,
_ysize: huffman_ysize,
color_cache,
image: entropy_image,
bits: huffman_bits,
mask: huffman_mask,
huffman_code_groups: hufftree_groups,
};
Ok(info)
}
/// Decodes and returns a single huffman tree
fn read_huffman_code(&mut self, alphabet_size: u16) -> ImageResult<HuffmanTree> {
let simple = self.bit_reader.read_bits::<u8>(1)? == 1;
if simple {
let num_symbols = self.bit_reader.read_bits::<u8>(1)? + 1;
let mut code_lengths = vec![u16::from(num_symbols - 1)];
let mut codes = vec![0];
let mut symbols = Vec::new();
let is_first_8bits = self.bit_reader.read_bits::<u8>(1)?;
symbols.push(self.bit_reader.read_bits::<u16>(1 + 7 * is_first_8bits)?);
if num_symbols == 2 {
symbols.push(self.bit_reader.read_bits::<u16>(8)?);
code_lengths.push(1);
codes.push(1);
}
HuffmanTree::build_explicit(code_lengths, codes, symbols)
} else {
let mut code_length_code_lengths = vec![0; CODE_LENGTH_CODES];
let num_code_lengths = 4 + self.bit_reader.read_bits::<usize>(4)?;
for i in 0..num_code_lengths {
code_length_code_lengths[CODE_LENGTH_CODE_ORDER[i]] =
self.bit_reader.read_bits(3)?;
}
let new_code_lengths =
self.read_huffman_code_lengths(code_length_code_lengths, alphabet_size)?;
HuffmanTree::build_implicit(new_code_lengths)
}
}
/// Reads huffman code lengths
fn read_huffman_code_lengths(
&mut self,
code_length_code_lengths: Vec<u16>,
num_symbols: u16,
) -> ImageResult<Vec<u16>> {
let table = HuffmanTree::build_implicit(code_length_code_lengths)?;
let mut max_symbol = if self.bit_reader.read_bits::<u8>(1)? == 1 {
let length_nbits = 2 + 2 * self.bit_reader.read_bits::<u8>(3)?;
2 + self.bit_reader.read_bits::<u16>(length_nbits)?
} else {
num_symbols
};
let mut code_lengths = vec![0; usize::from(num_symbols)];
let mut prev_code_len = 8; //default code length
let mut symbol = 0;
while symbol < num_symbols {
if max_symbol == 0 {
break;
}
max_symbol -= 1;
let code_len = table.read_symbol(&mut self.bit_reader)?;
if code_len < 16 {
code_lengths[usize::from(symbol)] = code_len;
symbol += 1;
if code_len != 0 {
prev_code_len = code_len;
}
} else {
let use_prev = code_len == 16;
let slot = code_len - 16;
let extra_bits = match slot {
0 => 2,
1 => 3,
2 => 7,
_ => return Err(DecoderError::BitStreamError.into()),
};
let repeat_offset = match slot {
0 | 1 => 3,
2 => 11,
_ => return Err(DecoderError::BitStreamError.into()),
};
let mut repeat = self.bit_reader.read_bits::<u16>(extra_bits)? + repeat_offset;
if symbol + repeat > num_symbols {
return Err(DecoderError::BitStreamError.into());
} else {
let length = if use_prev { prev_code_len } else { 0 };
while repeat > 0 {
repeat -= 1;
code_lengths[usize::from(symbol)] = length;
symbol += 1;
}
}
}
}
Ok(code_lengths)
}
/// Decodes the image data using the huffman trees and either of the 3 methods of decoding
fn decode_image_data(
&mut self,
width: u16,
height: u16,
mut huffman_info: HuffmanInfo,
) -> ImageResult<Vec<u32>> {
let num_values = usize::from(width) * usize::from(height);
let mut data = vec![0; num_values];
let huff_index = huffman_info.get_huff_index(0, 0);
let mut tree = &huffman_info.huffman_code_groups[huff_index];
let mut last_cached = 0;
let mut index = 0;
let mut x = 0;
let mut y = 0;
while index < num_values {
if (x & huffman_info.mask) == 0 {
let index = huffman_info.get_huff_index(x, y);
tree = &huffman_info.huffman_code_groups[index];
}
let code = tree[GREEN].read_symbol(&mut self.bit_reader)?;
//check code
if code < 256 {
//literal, so just use huffman codes and read as argb
let red = tree[RED].read_symbol(&mut self.bit_reader)?;
let blue = tree[BLUE].read_symbol(&mut self.bit_reader)?;
let alpha = tree[ALPHA].read_symbol(&mut self.bit_reader)?;
data[index] = (u32::from(alpha) << 24)
+ (u32::from(red) << 16)
+ (u32::from(code) << 8)
+ u32::from(blue);
index += 1;
x += 1;
if x >= width {
x = 0;
y += 1;
}
} else if code < 256 + 24 {
//backward reference, so go back and use that to add image data
let length_symbol = code - 256;
let length = Self::get_copy_distance(&mut self.bit_reader, length_symbol)?;
let dist_symbol = tree[DIST].read_symbol(&mut self.bit_reader)?;
let dist_code = Self::get_copy_distance(&mut self.bit_reader, dist_symbol)?;
let dist = Self::plane_code_to_distance(width, dist_code);
if index < dist || num_values - index < length {
return Err(DecoderError::BitStreamError.into());
}
for i in 0..length {
data[index + i] = data[index + i - dist];
}
index += length;
x += u16::try_from(length).unwrap();
while x >= width {
x -= width;
y += 1;
}
if index < num_values {
let index = huffman_info.get_huff_index(x, y);
tree = &huffman_info.huffman_code_groups[index];
}
} else {
//color cache, so use previously stored pixels to get this pixel
let key = code - 256 - 24;
if let Some(color_cache) = huffman_info.color_cache.as_mut() {
//cache old colors
while last_cached < index {
color_cache.insert(data[last_cached]);
last_cached += 1;
}
data[index] = color_cache.lookup(key.into())?;
} else {
return Err(DecoderError::BitStreamError.into());
}
index += 1;
x += 1;
if x >= width {
x = 0;
y += 1;
}
}
}
Ok(data)
}
/// Reads color cache data from the bitstream
fn read_color_cache(&mut self) -> ImageResult<Option<u8>> {
if self.bit_reader.read_bits::<u8>(1)? == 1 {
let code_bits = self.bit_reader.read_bits::<u8>(4)?;
if !(1..=11).contains(&code_bits) {
return Err(DecoderError::InvalidColorCacheBits(code_bits).into());
}
Ok(Some(code_bits))
} else {
Ok(None)
}
}
/// Gets the copy distance from the prefix code and bitstream
fn get_copy_distance(bit_reader: &mut BitReader, prefix_code: u16) -> ImageResult<usize> {
if prefix_code < 4 {
return Ok(usize::from(prefix_code + 1));
}
let extra_bits: u8 = ((prefix_code - 2) >> 1).try_into().unwrap();
let offset = (2 + (usize::from(prefix_code) & 1)) << extra_bits;
Ok(offset + bit_reader.read_bits::<usize>(extra_bits)? + 1)
}
/// Gets distance to pixel
fn plane_code_to_distance(xsize: u16, plane_code: usize) -> usize {
if plane_code > 120 {
plane_code - 120
} else {
let (xoffset, yoffset) = DISTANCE_MAP[plane_code - 1];
let dist = i32::from(xoffset) + i32::from(yoffset) * i32::from(xsize);
if dist < 1 {
return 1;
}
dist.try_into().unwrap()
}
}
}
#[derive(Debug, Clone)]
struct HuffmanInfo {
xsize: u16,
_ysize: u16,
color_cache: Option<ColorCache>,
image: Vec<u32>,
bits: u8,
mask: u16,
huffman_code_groups: Vec<HuffmanCodeGroup>,
}
impl HuffmanInfo {
fn get_huff_index(&self, x: u16, y: u16) -> usize {
if self.bits == 0 {
return 0;
}
let position = usize::from((y >> self.bits) * self.xsize + (x >> self.bits));
let meta_huff_code: usize = self.image[position].try_into().unwrap();
meta_huff_code
}
}
#[derive(Debug, Clone)]
struct ColorCache {
color_cache_bits: u8,
color_cache: Vec<u32>,
}
impl ColorCache {
fn insert(&mut self, color: u32) {
let index = (0x1e35a7bdu32.overflowing_mul(color).0) >> (32 - self.color_cache_bits);
self.color_cache[index as usize] = color;
}
fn lookup(&self, index: usize) -> ImageResult<u32> {
match self.color_cache.get(index) {
Some(&value) => Ok(value),
None => Err(DecoderError::BitStreamError.into()),
}
}
}
#[derive(Debug, Clone)]
pub(crate) struct BitReader {
buf: Vec<u8>,
index: usize,
bit_count: u8,
}
impl BitReader {
fn new() -> BitReader {
BitReader {
buf: Vec::new(),
index: 0,
bit_count: 0,
}
}
fn init(&mut self, buf: Vec<u8>) {
self.buf = buf;
}
pub(crate) fn read_bits<T>(&mut self, num: u8) -> ImageResult<T>
where
T: num_traits::Unsigned + Shl<u8, Output = T> + AddAssign<T> + From<bool>,
{
let mut value: T = T::zero();
for i in 0..num {
if self.buf.len() <= self.index {
return Err(DecoderError::BitStreamError.into());
}
let bit_true = self.buf[self.index] & (1 << self.bit_count) != 0;
value += T::from(bit_true) << i;
self.bit_count = if self.bit_count == 7 {
self.index += 1;
0
} else {
self.bit_count + 1
};
}
Ok(value)
}
}
#[derive(Debug, Clone, Default)]
pub(crate) struct LosslessFrame {
pub(crate) width: u16,
pub(crate) height: u16,
pub(crate) buf: Vec<u32>,
}
impl LosslessFrame {
/// Fills a buffer by converting from argb to rgba
pub(crate) fn fill_rgba(&self, buf: &mut [u8]) {
for (&argb_val, chunk) in self.buf.iter().zip(buf.chunks_exact_mut(4)) {
chunk[0] = ((argb_val >> 16) & 0xff).try_into().unwrap();
chunk[1] = ((argb_val >> 8) & 0xff).try_into().unwrap();
chunk[2] = (argb_val & 0xff).try_into().unwrap();
chunk[3] = ((argb_val >> 24) & 0xff).try_into().unwrap();
}
}
/// Get buffer size from the image
pub(crate) fn get_buf_size(&self) -> usize {
usize::from(self.width) * usize::from(self.height) * 4
}
/// Fills a buffer with just the green values from the lossless decoding
/// Used in extended alpha decoding
pub(crate) fn fill_green(&self, buf: &mut [u8]) {
for (&argb_val, buf_value) in self.buf.iter().zip(buf.iter_mut()) {
*buf_value = ((argb_val >> 8) & 0xff).try_into().unwrap();
}
}
}
#[cfg(test)]
mod test {
use super::BitReader;
#[test]
fn bit_read_test() {
let mut bit_reader = BitReader::new();
//10011100 01000001 11100001
let buf = vec![0x9C, 0x41, 0xE1];
bit_reader.init(buf);
assert_eq!(bit_reader.read_bits::<u8>(3).unwrap(), 4); //100
assert_eq!(bit_reader.read_bits::<u8>(2).unwrap(), 3); //11
assert_eq!(bit_reader.read_bits::<u8>(6).unwrap(), 12); //001100
assert_eq!(bit_reader.read_bits::<u16>(10).unwrap(), 40); //0000101000
assert_eq!(bit_reader.read_bits::<u8>(3).unwrap(), 7); //111
}
#[test]
fn bit_read_error_test() {
let mut bit_reader = BitReader::new();
//01101010
let buf = vec![0x6A];
bit_reader.init(buf);
assert_eq!(bit_reader.read_bits::<u8>(3).unwrap(), 2); //010
assert_eq!(bit_reader.read_bits::<u8>(5).unwrap(), 13); //01101
assert!(bit_reader.read_bits::<u8>(4).is_err()); //error
}
}

View file

@ -1,464 +0,0 @@
use std::convert::TryFrom;
use std::convert::TryInto;
use super::lossless::subsample_size;
use super::lossless::DecoderError;
#[derive(Debug, Clone)]
pub(crate) enum TransformType {
PredictorTransform {
size_bits: u8,
predictor_data: Vec<u32>,
},
ColorTransform {
size_bits: u8,
transform_data: Vec<u32>,
},
SubtractGreen,
ColorIndexingTransform {
table_size: u16,
table_data: Vec<u32>,
},
}
impl TransformType {
/// Applies a transform to the image data
pub(crate) fn apply_transform(
&self,
image_data: &mut Vec<u32>,
width: u16,
height: u16,
) -> Result<(), DecoderError> {
match self {
TransformType::PredictorTransform {
size_bits,
predictor_data,
} => {
let block_xsize = usize::from(subsample_size(width, *size_bits));
let width = usize::from(width);
let height = usize::from(height);
if image_data.len() < width * height {
return Err(DecoderError::TransformError);
}
//handle top and left borders specially
//this involves ignoring mode and just setting prediction values like this
image_data[0] = add_pixels(image_data[0], 0xff000000);
for x in 1..width {
image_data[x] = add_pixels(image_data[x], get_left(image_data, x, 0, width));
}
for y in 1..height {
image_data[y * width] =
add_pixels(image_data[y * width], get_top(image_data, 0, y, width));
}
for y in 1..height {
for x in 1..width {
let block_index = (y >> size_bits) * block_xsize + (x >> size_bits);
let index = y * width + x;
let green = (predictor_data[block_index] >> 8) & 0xff;
match green {
0 => image_data[index] = add_pixels(image_data[index], 0xff000000),
1 => {
image_data[index] =
add_pixels(image_data[index], get_left(image_data, x, y, width))
}
2 => {
image_data[index] =
add_pixels(image_data[index], get_top(image_data, x, y, width))
}
3 => {
image_data[index] = add_pixels(
image_data[index],
get_top_right(image_data, x, y, width),
)
}
4 => {
image_data[index] = add_pixels(
image_data[index],
get_top_left(image_data, x, y, width),
)
}
5 => {
image_data[index] = add_pixels(image_data[index], {
let first = average2(
get_left(image_data, x, y, width),
get_top_right(image_data, x, y, width),
);
average2(first, get_top(image_data, x, y, width))
})
}
6 => {
image_data[index] = add_pixels(
image_data[index],
average2(
get_left(image_data, x, y, width),
get_top_left(image_data, x, y, width),
),
)
}
7 => {
image_data[index] = add_pixels(
image_data[index],
average2(
get_left(image_data, x, y, width),
get_top(image_data, x, y, width),
),
)
}
8 => {
image_data[index] = add_pixels(
image_data[index],
average2(
get_top_left(image_data, x, y, width),
get_top(image_data, x, y, width),
),
)
}
9 => {
image_data[index] = add_pixels(
image_data[index],
average2(
get_top(image_data, x, y, width),
get_top_right(image_data, x, y, width),
),
)
}
10 => {
image_data[index] = add_pixels(image_data[index], {
let first = average2(
get_left(image_data, x, y, width),
get_top_left(image_data, x, y, width),
);
let second = average2(
get_top(image_data, x, y, width),
get_top_right(image_data, x, y, width),
);
average2(first, second)
})
}
11 => {
image_data[index] = add_pixels(
image_data[index],
select(
get_left(image_data, x, y, width),
get_top(image_data, x, y, width),
get_top_left(image_data, x, y, width),
),
)
}
12 => {
image_data[index] = add_pixels(
image_data[index],
clamp_add_subtract_full(
get_left(image_data, x, y, width),
get_top(image_data, x, y, width),
get_top_left(image_data, x, y, width),
),
)
}
13 => {
image_data[index] = add_pixels(image_data[index], {
let first = average2(
get_left(image_data, x, y, width),
get_top(image_data, x, y, width),
);
clamp_add_subtract_half(
first,
get_top_left(image_data, x, y, width),
)
})
}
_ => {}
}
}
}
}
TransformType::ColorTransform {
size_bits,
transform_data,
} => {
let block_xsize = usize::from(subsample_size(width, *size_bits));
let width = usize::from(width);
let height = usize::from(height);
for y in 0..height {
for x in 0..width {
let block_index = (y >> size_bits) * block_xsize + (x >> size_bits);
let index = y * width + x;
let multiplier =
ColorTransformElement::from_color_code(transform_data[block_index]);
image_data[index] = transform_color(&multiplier, image_data[index]);
}
}
}
TransformType::SubtractGreen => {
let width = usize::from(width);
for y in 0..usize::from(height) {
for x in 0..width {
image_data[y * width + x] = add_green(image_data[y * width + x]);
}
}
}
TransformType::ColorIndexingTransform {
table_size,
table_data,
} => {
let mut new_image_data =
Vec::with_capacity(usize::from(width) * usize::from(height));
let table_size = *table_size;
let width_bits: u8 = if table_size <= 2 {
3
} else if table_size <= 4 {
2
} else if table_size <= 16 {
1
} else {
0
};
let bits_per_pixel = 8 >> width_bits;
let mask = (1 << bits_per_pixel) - 1;
let mut src = 0;
let width = usize::from(width);
let pixels_per_byte = 1 << width_bits;
let count_mask = pixels_per_byte - 1;
let mut packed_pixels = 0;
for _y in 0..usize::from(height) {
for x in 0..width {
if (x & count_mask) == 0 {
packed_pixels = (image_data[src] >> 8) & 0xff;
src += 1;
}
let pixels: usize = (packed_pixels & mask).try_into().unwrap();
let new_val = if pixels >= table_size.into() {
0x00000000
} else {
table_data[pixels]
};
new_image_data.push(new_val);
packed_pixels >>= bits_per_pixel;
}
}
*image_data = new_image_data;
}
}
Ok(())
}
}
//predictor functions
/// Adds 2 pixels mod 256 for each pixel
pub(crate) fn add_pixels(a: u32, b: u32) -> u32 {
let new_alpha = ((a >> 24) + (b >> 24)) & 0xff;
let new_red = (((a >> 16) & 0xff) + ((b >> 16) & 0xff)) & 0xff;
let new_green = (((a >> 8) & 0xff) + ((b >> 8) & 0xff)) & 0xff;
let new_blue = ((a & 0xff) + (b & 0xff)) & 0xff;
(new_alpha << 24) + (new_red << 16) + (new_green << 8) + new_blue
}
/// Get left pixel
fn get_left(data: &[u32], x: usize, y: usize, width: usize) -> u32 {
data[y * width + x - 1]
}
/// Get top pixel
fn get_top(data: &[u32], x: usize, y: usize, width: usize) -> u32 {
data[(y - 1) * width + x]
}
/// Get pixel to top right
fn get_top_right(data: &[u32], x: usize, y: usize, width: usize) -> u32 {
// if x == width - 1 this gets the left most pixel of the current row
// as described in the specification
data[(y - 1) * width + x + 1]
}
/// Get pixel to top left
fn get_top_left(data: &[u32], x: usize, y: usize, width: usize) -> u32 {
data[(y - 1) * width + x - 1]
}
/// Get average of 2 pixels
fn average2(a: u32, b: u32) -> u32 {
let mut avg = 0u32;
for i in 0..4 {
let sub_a: u8 = ((a >> (i * 8)) & 0xff).try_into().unwrap();
let sub_b: u8 = ((b >> (i * 8)) & 0xff).try_into().unwrap();
avg |= u32::from(sub_average2(sub_a, sub_b)) << (i * 8);
}
avg
}
/// Get average of 2 bytes
fn sub_average2(a: u8, b: u8) -> u8 {
((u16::from(a) + u16::from(b)) / 2).try_into().unwrap()
}
/// Get a specific byte from argb pixel
fn get_byte(val: u32, byte: u8) -> u8 {
((val >> (byte * 8)) & 0xff).try_into().unwrap()
}
/// Get byte as i32 for convenience
fn get_byte_i32(val: u32, byte: u8) -> i32 {
i32::from(get_byte(val, byte))
}
/// Select left or top byte
fn select(left: u32, top: u32, top_left: u32) -> u32 {
let predict_alpha = get_byte_i32(left, 3) + get_byte_i32(top, 3) - get_byte_i32(top_left, 3);
let predict_red = get_byte_i32(left, 2) + get_byte_i32(top, 2) - get_byte_i32(top_left, 2);
let predict_green = get_byte_i32(left, 1) + get_byte_i32(top, 1) - get_byte_i32(top_left, 1);
let predict_blue = get_byte_i32(left, 0) + get_byte_i32(top, 0) - get_byte_i32(top_left, 0);
let predict_left = i32::abs(predict_alpha - get_byte_i32(left, 3))
+ i32::abs(predict_red - get_byte_i32(left, 2))
+ i32::abs(predict_green - get_byte_i32(left, 1))
+ i32::abs(predict_blue - get_byte_i32(left, 0));
let predict_top = i32::abs(predict_alpha - get_byte_i32(top, 3))
+ i32::abs(predict_red - get_byte_i32(top, 2))
+ i32::abs(predict_green - get_byte_i32(top, 1))
+ i32::abs(predict_blue - get_byte_i32(top, 0));
if predict_left < predict_top {
left
} else {
top
}
}
/// Clamp a to [0, 255]
fn clamp(a: i32) -> i32 {
if a < 0 {
0
} else if a > 255 {
255
} else {
a
}
}
/// Clamp add subtract full on one part
fn clamp_add_subtract_full_sub(a: i32, b: i32, c: i32) -> i32 {
clamp(a + b - c)
}
/// Clamp add subtract half on one part
fn clamp_add_subtract_half_sub(a: i32, b: i32) -> i32 {
clamp(a + (a - b) / 2)
}
/// Clamp add subtract full on 3 pixels
fn clamp_add_subtract_full(a: u32, b: u32, c: u32) -> u32 {
let mut value: u32 = 0;
for i in 0..4u8 {
let sub_a: i32 = ((a >> (i * 8)) & 0xff).try_into().unwrap();
let sub_b: i32 = ((b >> (i * 8)) & 0xff).try_into().unwrap();
let sub_c: i32 = ((c >> (i * 8)) & 0xff).try_into().unwrap();
value |=
u32::try_from(clamp_add_subtract_full_sub(sub_a, sub_b, sub_c)).unwrap() << (i * 8);
}
value
}
/// Clamp add subtract half on 2 pixels
fn clamp_add_subtract_half(a: u32, b: u32) -> u32 {
let mut value = 0;
for i in 0..4u8 {
let sub_a: i32 = ((a >> (i * 8)) & 0xff).try_into().unwrap();
let sub_b: i32 = ((b >> (i * 8)) & 0xff).try_into().unwrap();
value |= u32::try_from(clamp_add_subtract_half_sub(sub_a, sub_b)).unwrap() << (i * 8);
}
value
}
//color transform
#[derive(Debug, Clone, Copy)]
struct ColorTransformElement {
green_to_red: u8,
green_to_blue: u8,
red_to_blue: u8,
}
impl ColorTransformElement {
fn from_color_code(color_code: u32) -> ColorTransformElement {
ColorTransformElement {
green_to_red: (color_code & 0xff).try_into().unwrap(),
green_to_blue: ((color_code >> 8) & 0xff).try_into().unwrap(),
red_to_blue: ((color_code >> 16) & 0xff).try_into().unwrap(),
}
}
}
/// Does color transform on red and blue transformed by green
fn color_transform(red: u8, blue: u8, green: u8, trans: &ColorTransformElement) -> (u8, u8) {
let mut temp_red = u32::from(red);
let mut temp_blue = u32::from(blue);
//as does the conversion from u8 to signed two's complement i8 required
temp_red += color_transform_delta(trans.green_to_red as i8, green as i8);
temp_blue += color_transform_delta(trans.green_to_blue as i8, green as i8);
temp_blue += color_transform_delta(trans.red_to_blue as i8, temp_red as i8);
(
(temp_red & 0xff).try_into().unwrap(),
(temp_blue & 0xff).try_into().unwrap(),
)
}
/// Does color transform on 2 numbers
fn color_transform_delta(t: i8, c: i8) -> u32 {
((i16::from(t) * i16::from(c)) as u32) >> 5
}
// Does color transform on a pixel with a color transform element
fn transform_color(multiplier: &ColorTransformElement, color_value: u32) -> u32 {
let alpha = get_byte(color_value, 3);
let red = get_byte(color_value, 2);
let green = get_byte(color_value, 1);
let blue = get_byte(color_value, 0);
let (new_red, new_blue) = color_transform(red, blue, green, multiplier);
(u32::from(alpha) << 24)
+ (u32::from(new_red) << 16)
+ (u32::from(green) << 8)
+ u32::from(new_blue)
}
//subtract green function
/// Adds green to red and blue of a pixel
fn add_green(argb: u32) -> u32 {
let red = (argb >> 16) & 0xff;
let green = (argb >> 8) & 0xff;
let blue = argb & 0xff;
let new_red = (red + green) & 0xff;
let new_blue = (blue + green) & 0xff;
(argb & 0xff00ff00) | (new_red << 16) | (new_blue)
}

View file

@ -1,17 +1,7 @@
//! Decoding and Encoding of WebP Images
pub use self::encoder::WebPEncoder;
mod decoder;
mod encoder;
pub use self::decoder::WebPDecoder;
mod decoder;
mod extended;
mod huffman;
mod loop_filter;
mod lossless;
mod lossless_transform;
mod transform;
pub mod vp8;
pub use self::encoder::WebPEncoder;

View file

@ -1,77 +0,0 @@
static CONST1: i64 = 20091;
static CONST2: i64 = 35468;
pub(crate) fn idct4x4(block: &mut [i32]) {
// The intermediate results may overflow the types, so we stretch the type.
fn fetch(block: &[i32], idx: usize) -> i64 {
i64::from(block[idx])
}
for i in 0usize..4 {
let a1 = fetch(block, i) + fetch(block, 8 + i);
let b1 = fetch(block, i) - fetch(block, 8 + i);
let t1 = (fetch(block, 4 + i) * CONST2) >> 16;
let t2 = fetch(block, 12 + i) + ((fetch(block, 12 + i) * CONST1) >> 16);
let c1 = t1 - t2;
let t1 = fetch(block, 4 + i) + ((fetch(block, 4 + i) * CONST1) >> 16);
let t2 = (fetch(block, 12 + i) * CONST2) >> 16;
let d1 = t1 + t2;
block[i] = (a1 + d1) as i32;
block[4 + i] = (b1 + c1) as i32;
block[4 * 3 + i] = (a1 - d1) as i32;
block[4 * 2 + i] = (b1 - c1) as i32;
}
for i in 0usize..4 {
let a1 = fetch(block, 4 * i) + fetch(block, 4 * i + 2);
let b1 = fetch(block, 4 * i) - fetch(block, 4 * i + 2);
let t1 = (fetch(block, 4 * i + 1) * CONST2) >> 16;
let t2 = fetch(block, 4 * i + 3) + ((fetch(block, 4 * i + 3) * CONST1) >> 16);
let c1 = t1 - t2;
let t1 = fetch(block, 4 * i + 1) + ((fetch(block, 4 * i + 1) * CONST1) >> 16);
let t2 = (fetch(block, 4 * i + 3) * CONST2) >> 16;
let d1 = t1 + t2;
block[4 * i] = ((a1 + d1 + 4) >> 3) as i32;
block[4 * i + 3] = ((a1 - d1 + 4) >> 3) as i32;
block[4 * i + 1] = ((b1 + c1 + 4) >> 3) as i32;
block[4 * i + 2] = ((b1 - c1 + 4) >> 3) as i32;
}
}
// 14.3
pub(crate) fn iwht4x4(block: &mut [i32]) {
for i in 0usize..4 {
let a1 = block[i] + block[12 + i];
let b1 = block[4 + i] + block[8 + i];
let c1 = block[4 + i] - block[8 + i];
let d1 = block[i] - block[12 + i];
block[i] = a1 + b1;
block[4 + i] = c1 + d1;
block[8 + i] = a1 - b1;
block[12 + i] = d1 - c1;
}
for i in 0usize..4 {
let a1 = block[4 * i] + block[4 * i + 3];
let b1 = block[4 * i + 1] + block[4 * i + 2];
let c1 = block[4 * i + 1] - block[4 * i + 2];
let d1 = block[4 * i] - block[4 * i + 3];
let a2 = a1 + b1;
let b2 = c1 + d1;
let c2 = a1 - b1;
let d2 = d1 - c1;
block[4 * i] = (a2 + 3) >> 3;
block[4 * i + 1] = (b2 + 3) >> 3;
block[4 * i + 2] = (c2 + 3) >> 3;
block[4 * i + 3] = (d2 + 3) >> 3;
}
}

File diff suppressed because it is too large Load diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 195 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 226 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.5 KiB